mirror of
https://github.com/opencv/opencv.git
synced 2024-11-27 12:40:05 +08:00
Merge remote-tracking branch 'upstream/3.4' into merge-3.4
This commit is contained in:
commit
19926e2979
@ -29,84 +29,10 @@ endif()
|
|||||||
|
|
||||||
# ======================
|
# ======================
|
||||||
|
|
||||||
macro(ocv_ie_find_extra_libraries find_prefix find_suffix)
|
|
||||||
file(GLOB libraries "${INF_ENGINE_LIB_DIRS}/${find_prefix}inference_engine*${find_suffix}")
|
|
||||||
foreach(full_path IN LISTS libraries)
|
|
||||||
get_filename_component(library "${full_path}" NAME_WE)
|
|
||||||
string(REPLACE "${find_prefix}" "" library "${library}")
|
|
||||||
if(library STREQUAL "inference_engine" OR library STREQUAL "inference_engined")
|
|
||||||
# skip
|
|
||||||
else()
|
|
||||||
add_library(${library} UNKNOWN IMPORTED)
|
|
||||||
set_target_properties(${library} PROPERTIES
|
|
||||||
IMPORTED_LOCATION "${full_path}")
|
|
||||||
list(APPEND custom_libraries ${library})
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
endmacro()
|
|
||||||
|
|
||||||
function(add_custom_ie_build _inc _lib _lib_rel _lib_dbg _msg)
|
|
||||||
if(NOT _inc OR NOT (_lib OR _lib_rel OR _lib_dbg))
|
|
||||||
return()
|
|
||||||
endif()
|
|
||||||
if(NOT _lib)
|
|
||||||
if(_lib_rel)
|
|
||||||
set(_lib "${_lib_rel}")
|
|
||||||
else()
|
|
||||||
set(_lib "${_lib_dbg}")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
add_library(inference_engine UNKNOWN IMPORTED)
|
|
||||||
set_target_properties(inference_engine PROPERTIES
|
|
||||||
IMPORTED_LOCATION "${_lib}"
|
|
||||||
IMPORTED_IMPLIB_RELEASE "${_lib_rel}"
|
|
||||||
IMPORTED_IMPLIB_DEBUG "${_lib_dbg}"
|
|
||||||
INTERFACE_INCLUDE_DIRECTORIES "${_inc}"
|
|
||||||
)
|
|
||||||
|
|
||||||
set(custom_libraries "")
|
|
||||||
set(__prefixes "${CMAKE_FIND_LIBRARY_PREFIXES}")
|
|
||||||
if(NOT __prefixes)
|
|
||||||
set(__prefixes "_empty_")
|
|
||||||
endif()
|
|
||||||
foreach(find_prefix ${__prefixes})
|
|
||||||
if(find_prefix STREQUAL "_empty_") # foreach doesn't iterate over empty elements
|
|
||||||
set(find_prefix "")
|
|
||||||
endif()
|
|
||||||
if(NOT DEFINED INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES) # allow custom override
|
|
||||||
set(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES})
|
|
||||||
if(APPLE)
|
|
||||||
ocv_list_filterout(INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES "^.so$") # skip plugins (can't be linked)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
foreach(find_suffix ${INFERENCE_ENGINE_FIND_LIBRARY_SUFFIXES})
|
|
||||||
ocv_ie_find_extra_libraries("${find_prefix}" "${find_suffix}")
|
|
||||||
endforeach()
|
|
||||||
if(NOT CMAKE_FIND_LIBRARY_SUFFIXES)
|
|
||||||
ocv_ie_find_extra_libraries("${find_prefix}" "")
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
|
|
||||||
find_library(INF_ENGINE_OMP_LIBRARY iomp5 PATHS "${INF_ENGINE_OMP_DIR}" NO_DEFAULT_PATH)
|
|
||||||
if(NOT INF_ENGINE_OMP_LIBRARY)
|
|
||||||
message(WARNING "OpenMP for IE have not been found. Set INF_ENGINE_OMP_DIR variable if you experience build errors.")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
if(EXISTS "${INF_ENGINE_OMP_LIBRARY}")
|
|
||||||
set_target_properties(inference_engine PROPERTIES IMPORTED_LINK_INTERFACE_LIBRARIES "${INF_ENGINE_OMP_LIBRARY}")
|
|
||||||
endif()
|
|
||||||
set(INF_ENGINE_VERSION "Unknown" CACHE STRING "")
|
|
||||||
set(INF_ENGINE_TARGET "inference_engine;${custom_libraries}" PARENT_SCOPE)
|
|
||||||
message(STATUS "Detected InferenceEngine: ${_msg}")
|
|
||||||
endfunction()
|
|
||||||
|
|
||||||
# ======================
|
|
||||||
|
|
||||||
find_package(InferenceEngine QUIET)
|
find_package(InferenceEngine QUIET)
|
||||||
if(InferenceEngine_FOUND)
|
if(InferenceEngine_FOUND)
|
||||||
set(INF_ENGINE_TARGET ${InferenceEngine_LIBRARIES})
|
set(INF_ENGINE_TARGET ${InferenceEngine_LIBRARIES})
|
||||||
set(INF_ENGINE_VERSION "${InferenceEngine_VERSION}" CACHE STRING "")
|
set(INF_ENGINE_VERSION "${InferenceEngine_VERSION}")
|
||||||
message(STATUS "Detected InferenceEngine: cmake package (${InferenceEngine_VERSION})")
|
message(STATUS "Detected InferenceEngine: cmake package (${InferenceEngine_VERSION})")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
@ -124,38 +50,6 @@ elseif(DEFINED INF_ENGINE_RELEASE)
|
|||||||
endif()
|
endif()
|
||||||
set(INF_ENGINE_RELEASE "${INF_ENGINE_RELEASE_INIT}" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
|
set(INF_ENGINE_RELEASE "${INF_ENGINE_RELEASE_INIT}" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)")
|
||||||
|
|
||||||
if(NOT INF_ENGINE_TARGET AND INF_ENGINE_LIB_DIRS AND INF_ENGINE_INCLUDE_DIRS)
|
|
||||||
find_path(ie_custom_inc "inference_engine.hpp" PATHS "${INF_ENGINE_INCLUDE_DIRS}" NO_DEFAULT_PATH)
|
|
||||||
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
|
|
||||||
find_library(ie_custom_lib_dbg "inference_engined" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH) # Win32 and MacOSX
|
|
||||||
endif()
|
|
||||||
find_library(ie_custom_lib "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH)
|
|
||||||
find_library(ie_custom_lib_rel "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Release" NO_DEFAULT_PATH)
|
|
||||||
find_library(ie_custom_lib_dbg "inference_engine" PATHS "${INF_ENGINE_LIB_DIRS}/Debug" NO_DEFAULT_PATH)
|
|
||||||
add_custom_ie_build("${ie_custom_inc}" "${ie_custom_lib}" "${ie_custom_lib_rel}" "${ie_custom_lib_dbg}" "INF_ENGINE_{INCLUDE,LIB}_DIRS")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(_loc "$ENV{INTEL_OPENVINO_DIR}")
|
|
||||||
if(NOT _loc AND DEFINED ENV{INTEL_CVSDK_DIR})
|
|
||||||
set(_loc "$ENV{INTEL_CVSDK_DIR}") # OpenVINO 2018.x
|
|
||||||
endif()
|
|
||||||
if(NOT INF_ENGINE_TARGET AND _loc)
|
|
||||||
if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
|
|
||||||
set(INF_ENGINE_PLATFORM_DEFAULT "ubuntu_16.04")
|
|
||||||
else()
|
|
||||||
set(INF_ENGINE_PLATFORM_DEFAULT "")
|
|
||||||
endif()
|
|
||||||
set(INF_ENGINE_PLATFORM "${INF_ENGINE_PLATFORM_DEFAULT}" CACHE STRING "InferenceEngine platform (library dir)")
|
|
||||||
find_path(ie_custom_env_inc "inference_engine.hpp" PATHS "${_loc}/deployment_tools/inference_engine/include" NO_DEFAULT_PATH)
|
|
||||||
if(CMAKE_BUILD_TYPE STREQUAL "Debug")
|
|
||||||
find_library(ie_custom_env_lib_dbg "inference_engined" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH)
|
|
||||||
endif()
|
|
||||||
find_library(ie_custom_env_lib "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/${INF_ENGINE_PLATFORM}/intel64" NO_DEFAULT_PATH)
|
|
||||||
find_library(ie_custom_env_lib_rel "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Release" NO_DEFAULT_PATH)
|
|
||||||
find_library(ie_custom_env_lib_dbg "inference_engine" PATHS "${_loc}/deployment_tools/inference_engine/lib/intel64/Debug" NO_DEFAULT_PATH)
|
|
||||||
add_custom_ie_build("${ie_custom_env_inc}" "${ie_custom_env_lib}" "${ie_custom_env_lib_rel}" "${ie_custom_env_lib_dbg}" "OpenVINO (${_loc})")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
set(tgts)
|
set(tgts)
|
||||||
set(defs)
|
set(defs)
|
||||||
|
|
||||||
|
@ -309,8 +309,8 @@ public:
|
|||||||
READ = 0, //!< value, open the file for reading
|
READ = 0, //!< value, open the file for reading
|
||||||
WRITE = 1, //!< value, open the file for writing
|
WRITE = 1, //!< value, open the file for writing
|
||||||
APPEND = 2, //!< value, open the file for appending
|
APPEND = 2, //!< value, open the file for appending
|
||||||
MEMORY = 4, //!< flag, read data from source or write data to the internal buffer (which is
|
MEMORY = 4, /**< flag, read data from source or write data to the internal buffer (which is
|
||||||
//!< returned by FileStorage::release)
|
returned by FileStorage::release) */
|
||||||
FORMAT_MASK = (7<<3), //!< mask for format flags
|
FORMAT_MASK = (7<<3), //!< mask for format flags
|
||||||
FORMAT_AUTO = 0, //!< flag, auto format
|
FORMAT_AUTO = 0, //!< flag, auto format
|
||||||
FORMAT_XML = (1<<3), //!< flag, XML format
|
FORMAT_XML = (1<<3), //!< flag, XML format
|
||||||
|
@ -64,6 +64,16 @@
|
|||||||
#define HAL_LU_SMALL_MATRIX_THRESH 100
|
#define HAL_LU_SMALL_MATRIX_THRESH 100
|
||||||
#define HAL_CHOLESKY_SMALL_MATRIX_THRESH 100
|
#define HAL_CHOLESKY_SMALL_MATRIX_THRESH 100
|
||||||
|
|
||||||
|
#if defined(__clang__) && defined(__has_feature)
|
||||||
|
#if __has_feature(memory_sanitizer)
|
||||||
|
#define CV_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) \
|
||||||
|
__msan_unpoison(adresse, size)
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
#ifndef CV_ANNOTATE_MEMORY_IS_INITIALIZED
|
||||||
|
#define CV_ANNOTATE_MEMORY_IS_INITIALIZED(address, size) do { } while(0)
|
||||||
|
#endif
|
||||||
|
|
||||||
//lapack stores matrices in column-major order so transposing is needed everywhere
|
//lapack stores matrices in column-major order so transposing is needed everywhere
|
||||||
template <typename fptype> static inline void
|
template <typename fptype> static inline void
|
||||||
transpose_square_inplace(fptype *src, size_t src_ld, size_t m)
|
transpose_square_inplace(fptype *src, size_t src_ld, size_t m)
|
||||||
@ -239,20 +249,16 @@ lapack_SVD(fptype* a, size_t a_step, fptype *w, fptype* u, size_t u_step, fptype
|
|||||||
else if(typeid(fptype) == typeid(double))
|
else if(typeid(fptype) == typeid(double))
|
||||||
OCV_LAPACK_FUNC(dgesdd)(mode, &m, &n, (double*)a, &lda, (double*)w, (double*)u, &ldu, (double*)vt, &ldv, (double*)buffer, &lwork, iworkBuf, info);
|
OCV_LAPACK_FUNC(dgesdd)(mode, &m, &n, (double*)a, &lda, (double*)w, (double*)u, &ldu, (double*)vt, &ldv, (double*)buffer, &lwork, iworkBuf, info);
|
||||||
|
|
||||||
#if defined(__clang__) && defined(__has_feature)
|
|
||||||
#if __has_feature(memory_sanitizer)
|
|
||||||
// Make sure MSAN sees the memory as having been written.
|
// Make sure MSAN sees the memory as having been written.
|
||||||
// MSAN does not think it has been written because a different language was called.
|
// MSAN does not think it has been written because a different language was called.
|
||||||
__msan_unpoison(a, a_step * n);
|
CV_ANNOTATE_MEMORY_IS_INITIALIZED(a, a_step * n);
|
||||||
__msan_unpoison(buffer, sizeof(fptype) * (lwork + 1));
|
CV_ANNOTATE_MEMORY_IS_INITIALIZED(buffer, sizeof(fptype) * (lwork + 1));
|
||||||
if (u)
|
if (u)
|
||||||
__msan_unpoison(u, u_step * m);
|
CV_ANNOTATE_MEMORY_IS_INITIALIZED(u, u_step * m);
|
||||||
if (vt)
|
if (vt)
|
||||||
__msan_unpoison(vt, v_step * n);
|
CV_ANNOTATE_MEMORY_IS_INITIALIZED(vt, v_step * n);
|
||||||
if (w)
|
if (w)
|
||||||
__msan_unpoison(w, sizeof(fptype) * std::min(m, n));
|
CV_ANNOTATE_MEMORY_IS_INITIALIZED(w, sizeof(fptype) * std::min(m, n));
|
||||||
#endif // __has_feature(memory_sanitizer)
|
|
||||||
#endif // defined(__clang__) && defined(__has_feature)
|
|
||||||
|
|
||||||
if(!(flags & CV_HAL_SVD_NO_UV))
|
if(!(flags & CV_HAL_SVD_NO_UV))
|
||||||
transpose_square_inplace(vt, ldv, n);
|
transpose_square_inplace(vt, ldv, n);
|
||||||
@ -357,6 +363,7 @@ lapack_QR(fptype* a, size_t a_step, int m, int n, int k, fptype* b, size_t b_ste
|
|||||||
dgeqrf_(&m, &n, (double*)tmpA, &ldtmpA, (double*)dst, (double*)buffer, &lwork, info);
|
dgeqrf_(&m, &n, (double*)tmpA, &ldtmpA, (double*)dst, (double*)buffer, &lwork, info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
CV_ANNOTATE_MEMORY_IS_INITIALIZED(info, sizeof(int));
|
||||||
if (m == n)
|
if (m == n)
|
||||||
transpose_square_inplace(a, lda, m);
|
transpose_square_inplace(a, lda, m);
|
||||||
else
|
else
|
||||||
|
@ -165,7 +165,12 @@ elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
|
|||||||
endif()
|
endif()
|
||||||
|
|
||||||
set(dnn_runtime_libs "")
|
set(dnn_runtime_libs "")
|
||||||
if(TARGET ocv.3rdparty.openvino)
|
|
||||||
|
ocv_option(OPENCV_DNN_OPENVINO "Build with OpenVINO support (2021.4+)" (TARGET ocv.3rdparty.openvino))
|
||||||
|
if(TARGET ocv.3rdparty.openvino AND OPENCV_DNN_OPENVINO)
|
||||||
|
if(NOT HAVE_OPENVINO AND NOT HAVE_NGRAPH)
|
||||||
|
message(FATAL_ERROR "DNN: Inference Engine is not supported without enabled 'nGraph'. Check build configuration.")
|
||||||
|
endif()
|
||||||
list(APPEND dnn_runtime_libs ocv.3rdparty.openvino)
|
list(APPEND dnn_runtime_libs ocv.3rdparty.openvino)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -302,8 +302,6 @@ CV__DNN_INLINE_NS_BEGIN
|
|||||||
*/
|
*/
|
||||||
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs);
|
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs);
|
||||||
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs);
|
|
||||||
|
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs, const std::vector<Ptr<BackendNode> >& nodes);
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs, const std::vector<Ptr<BackendNode> >& nodes);
|
||||||
|
|
||||||
virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs);
|
virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs);
|
||||||
|
@ -15,14 +15,18 @@ CV__DNN_INLINE_NS_BEGIN
|
|||||||
|
|
||||||
|
|
||||||
/* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */
|
/* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */
|
||||||
|
/// @deprecated
|
||||||
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API "NN_BUILDER"
|
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API "NN_BUILDER"
|
||||||
|
/// @deprecated
|
||||||
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH "NGRAPH"
|
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH "NGRAPH"
|
||||||
|
|
||||||
/** @brief Returns Inference Engine internal backend API.
|
/** @brief Returns Inference Engine internal backend API.
|
||||||
*
|
*
|
||||||
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
|
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
|
||||||
*
|
*
|
||||||
* Default value is controlled through `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable).
|
* `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable) is ignored since 4.6.0.
|
||||||
|
*
|
||||||
|
* @deprecated
|
||||||
*/
|
*/
|
||||||
CV_EXPORTS_W cv::String getInferenceEngineBackendType();
|
CV_EXPORTS_W cv::String getInferenceEngineBackendType();
|
||||||
|
|
||||||
@ -31,6 +35,8 @@ CV_EXPORTS_W cv::String getInferenceEngineBackendType();
|
|||||||
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
|
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
|
||||||
*
|
*
|
||||||
* @returns previous value of internal backend API
|
* @returns previous value of internal backend API
|
||||||
|
*
|
||||||
|
* @deprecated
|
||||||
*/
|
*/
|
||||||
CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType);
|
CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType);
|
||||||
|
|
||||||
|
@ -177,48 +177,29 @@ private:
|
|||||||
|
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (checkIETarget(DNN_TARGET_CPU)) {
|
if (checkIETarget(DNN_TARGET_CPU)) {
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_CPU));
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_CPU));
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_CPU));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
if (checkIETarget(DNN_TARGET_MYRIAD)) {
|
if (checkIETarget(DNN_TARGET_MYRIAD)) {
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_MYRIAD));
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD));
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
if (checkIETarget(DNN_TARGET_HDDL)) {
|
if (checkIETarget(DNN_TARGET_HDDL)) {
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_HDDL));
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_HDDL));
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_HDDL));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
if (checkIETarget(DNN_TARGET_FPGA))
|
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_FPGA));
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_OPENCL
|
#ifdef HAVE_OPENCL
|
||||||
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
|
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
|
||||||
{
|
{
|
||||||
if (checkIETarget(DNN_TARGET_OPENCL)) {
|
if (checkIETarget(DNN_TARGET_OPENCL)) {
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL));
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL));
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL));
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
if (checkIETarget(DNN_TARGET_OPENCL_FP16)) {
|
if (checkIETarget(DNN_TARGET_OPENCL_FP16)) {
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL_FP16));
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL_FP16));
|
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL_FP16));
|
||||||
#endif
|
#endif
|
||||||
@ -273,7 +254,7 @@ std::vector<Target> getAvailableTargets(Backend be)
|
|||||||
be = (Backend)PARAM_DNN_BACKEND_DEFAULT;
|
be = (Backend)PARAM_DNN_BACKEND_DEFAULT;
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (be == DNN_BACKEND_INFERENCE_ENGINE)
|
if (be == DNN_BACKEND_INFERENCE_ENGINE)
|
||||||
be = getInferenceEngineBackendTypeParam();
|
be = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
std::vector<Target> result;
|
std::vector<Target> result;
|
||||||
@ -637,8 +618,7 @@ struct DataLayer : public Layer
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV;
|
||||||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && inputsData.size() == 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
|
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
|
||||||
@ -829,39 +809,6 @@ struct DataLayer : public Layer
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
CV_CheckEQ(inputsData.size(), (size_t)1, "");
|
|
||||||
CV_CheckEQ(inputsData[0].dims, 4, "");
|
|
||||||
const size_t numChannels = inputsData[0].size[1];
|
|
||||||
CV_Assert(numChannels <= 4);
|
|
||||||
|
|
||||||
// Scale
|
|
||||||
InferenceEngine::TensorDesc td(InferenceEngine::Precision::FP32, {numChannels},
|
|
||||||
InferenceEngine::Layout::C);
|
|
||||||
auto weights = InferenceEngine::make_shared_blob<float>(td);
|
|
||||||
weights->allocate();
|
|
||||||
|
|
||||||
float* weight_buf = weights->buffer().as<float*>();
|
|
||||||
std::fill(weight_buf, weight_buf + numChannels, scaleFactors[0]);
|
|
||||||
|
|
||||||
// Mean subtraction
|
|
||||||
auto biases = InferenceEngine::make_shared_blob<float>(td);
|
|
||||||
biases->allocate();
|
|
||||||
float* bias_buf = biases->buffer().as<float*>();
|
|
||||||
|
|
||||||
for (int i = 0; i < numChannels; ++i)
|
|
||||||
{
|
|
||||||
bias_buf[i] = -means[0][i] * scaleFactors[0];
|
|
||||||
}
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
|
|
||||||
addConstantData("weights", weights, ieLayer);
|
|
||||||
addConstantData("biases", biases, ieLayer);
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
std::vector<String> outNames;
|
std::vector<String> outNames;
|
||||||
std::vector<MatShape> shapes;
|
std::vector<MatShape> shapes;
|
||||||
@ -1119,18 +1066,14 @@ static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m)
|
|||||||
}
|
}
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
|
||||||
return Ptr<BackendWrapper>(new InfEngineBackendWrapper(targetId, m));
|
|
||||||
#else
|
|
||||||
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
return Ptr<BackendWrapper>(new NgraphBackendWrapper(targetId, m));
|
return Ptr<BackendWrapper>(new NgraphBackendWrapper(targetId, m));
|
||||||
#else
|
#else
|
||||||
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
|
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of OpenVINO / Inference Engine + nGraph");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
else if (backendId == DNN_BACKEND_WEBNN)
|
else if (backendId == DNN_BACKEND_WEBNN)
|
||||||
@ -1278,7 +1221,7 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
}
|
}
|
||||||
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||||
{
|
{
|
||||||
return wrapMat(preferableBackend, preferableTarget, host);
|
CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
|
||||||
}
|
}
|
||||||
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
@ -1407,7 +1350,7 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
preferableBackend = (Backend)PARAM_DNN_BACKEND_DEFAULT;
|
preferableBackend = (Backend)PARAM_DNN_BACKEND_DEFAULT;
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
|
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
|
||||||
preferableBackend = getInferenceEngineBackendTypeParam();
|
preferableBackend = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH; // = getInferenceEngineBackendTypeParam();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
CV_Assert(preferableBackend != DNN_BACKEND_OPENCV ||
|
CV_Assert(preferableBackend != DNN_BACKEND_OPENCV ||
|
||||||
@ -1418,8 +1361,7 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
preferableTarget == DNN_TARGET_CPU ||
|
preferableTarget == DNN_TARGET_CPU ||
|
||||||
preferableTarget == DNN_TARGET_OPENCL);
|
preferableTarget == DNN_TARGET_OPENCL);
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
{
|
{
|
||||||
CV_Assert(
|
CV_Assert(
|
||||||
(preferableTarget == DNN_TARGET_CPU && (!isArmComputePlugin() || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) ||
|
(preferableTarget == DNN_TARGET_CPU && (!isArmComputePlugin() || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) ||
|
||||||
@ -1721,14 +1663,6 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
}
|
}
|
||||||
else if (preferableBackend == DNN_BACKEND_HALIDE)
|
else if (preferableBackend == DNN_BACKEND_HALIDE)
|
||||||
initHalideBackend();
|
initHalideBackend();
|
||||||
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
|
||||||
{
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
initInfEngineBackend(blobsToKeep_);
|
|
||||||
#else
|
|
||||||
CV_Assert(false && "This OpenCV version is built without Inference Engine NN Builder API support");
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
@ -1750,7 +1684,7 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
else if (preferableBackend == DNN_BACKEND_CUDA)
|
else if (preferableBackend == DNN_BACKEND_CUDA)
|
||||||
initCUDABackend(blobsToKeep_);
|
initCUDABackend(blobsToKeep_);
|
||||||
else
|
else
|
||||||
CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
|
CV_Error(Error::StsNotImplemented, cv::format("Unknown backend identifier: %d", preferableBackend));
|
||||||
}
|
}
|
||||||
|
|
||||||
void initHalideBackend()
|
void initHalideBackend()
|
||||||
@ -1808,316 +1742,6 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
// Before launching Inference Engine graph we need to specify output blobs.
|
|
||||||
// This function requests output blobs based on inputs references of
|
|
||||||
// layers from default backend or layers from different graphs.
|
|
||||||
void addInfEngineNetOutputs(LayerData &ld)
|
|
||||||
{
|
|
||||||
CV_TRACE_FUNCTION();
|
|
||||||
Ptr<InfEngineBackendNet> layerNet;
|
|
||||||
if (ld.backendNodes.find(preferableBackend) != ld.backendNodes.end())
|
|
||||||
{
|
|
||||||
Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
|
|
||||||
if (!node.empty())
|
|
||||||
{
|
|
||||||
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
|
|
||||||
CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty());
|
|
||||||
layerNet = ieNode->net;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// For an every input reference we check that it belongs to one of
|
|
||||||
// the Inference Engine backend graphs. Request an output blob if it is.
|
|
||||||
// Do nothing if layer's input is from the same graph.
|
|
||||||
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
|
|
||||||
{
|
|
||||||
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
|
|
||||||
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
|
|
||||||
if (!inpNode.empty())
|
|
||||||
{
|
|
||||||
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
|
|
||||||
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
|
|
||||||
if (layerNet != ieInpNode->net)
|
|
||||||
{
|
|
||||||
// layerNet is empty or nodes are from different graphs.
|
|
||||||
ieInpNode->net->addOutput(ieInpNode->layer.getName());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void initInfEngineBackend(const std::vector<LayerPin>& blobsToKeep_)
|
|
||||||
{
|
|
||||||
CV_TRACE_FUNCTION();
|
|
||||||
CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, haveInfEngine());
|
|
||||||
MapIdToLayerData::iterator it;
|
|
||||||
Ptr<InfEngineBackendNet> net;
|
|
||||||
|
|
||||||
for (it = layers.begin(); it != layers.end(); ++it)
|
|
||||||
{
|
|
||||||
LayerData &ld = it->second;
|
|
||||||
if (ld.id == 0)
|
|
||||||
{
|
|
||||||
CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) ||
|
|
||||||
(netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
|
|
||||||
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
|
||||||
{
|
|
||||||
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
|
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
|
|
||||||
dataPtr->name = netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i];
|
|
||||||
#else
|
|
||||||
dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
|
||||||
{
|
|
||||||
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
|
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
|
|
||||||
dataPtr->name = ld.name;
|
|
||||||
#else
|
|
||||||
dataPtr->setName(ld.name);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (skipInfEngineInit)
|
|
||||||
{
|
|
||||||
Ptr<BackendNode> node = layers[lastLayerId].backendNodes[preferableBackend];
|
|
||||||
CV_Assert(!node.empty());
|
|
||||||
|
|
||||||
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
|
|
||||||
CV_Assert(!ieNode.empty());
|
|
||||||
ieNode->net->reset();
|
|
||||||
|
|
||||||
for (it = layers.begin(); it != layers.end(); ++it)
|
|
||||||
{
|
|
||||||
LayerData &ld = it->second;
|
|
||||||
if (ld.id == 0)
|
|
||||||
{
|
|
||||||
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
|
|
||||||
{
|
|
||||||
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.inputBlobsWrappers[i]);
|
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
|
|
||||||
dataPtr->name = netInputLayer->outNames[i];
|
|
||||||
#else
|
|
||||||
dataPtr->setName(netInputLayer->outNames[i]);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
|
|
||||||
{
|
|
||||||
InferenceEngine::DataPtr dataPtr = infEngineDataNode(ld.outputBlobsWrappers[i]);
|
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
|
|
||||||
dataPtr->name = ld.name;
|
|
||||||
#else
|
|
||||||
dataPtr->setName(ld.name);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ieNode->net->addBlobs(ld.inputBlobsWrappers);
|
|
||||||
ieNode->net->addBlobs(ld.outputBlobsWrappers);
|
|
||||||
ld.skip = true;
|
|
||||||
}
|
|
||||||
layers[lastLayerId].skip = false;
|
|
||||||
ieNode->net->init((Target)preferableTarget);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build Inference Engine networks from sets of layers that support this
|
|
||||||
// backend. Split a whole model on several Inference Engine networks if
|
|
||||||
// some of layers are not implemented.
|
|
||||||
|
|
||||||
bool supportsCPUFallback = preferableTarget == DNN_TARGET_CPU ||
|
|
||||||
BackendRegistry::checkIETarget(DNN_TARGET_CPU);
|
|
||||||
|
|
||||||
// Set of all input and output blobs wrappers for current network.
|
|
||||||
std::map<LayerPin, Ptr<BackendWrapper> > netBlobsWrappers;
|
|
||||||
for (it = layers.begin(); it != layers.end(); ++it)
|
|
||||||
{
|
|
||||||
LayerData &ld = it->second;
|
|
||||||
if (ld.id == 0 && ld.skip)
|
|
||||||
continue;
|
|
||||||
bool fused = ld.skip;
|
|
||||||
|
|
||||||
Ptr<Layer> layer = ld.layerInstance;
|
|
||||||
if (!fused && !layer->supportBackend(preferableBackend))
|
|
||||||
{
|
|
||||||
bool customizable = ld.id != 0 &&
|
|
||||||
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R2) &&
|
|
||||||
supportsCPUFallback;
|
|
||||||
// TODO: there is a bug in Myriad plugin with custom layers shape infer.
|
|
||||||
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
|
|
||||||
{
|
|
||||||
for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
|
|
||||||
{
|
|
||||||
customizable = ld.inputBlobs[i]->size[0] == 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: fix these workarounds
|
|
||||||
if (preferableTarget == DNN_TARGET_MYRIAD ||
|
|
||||||
preferableTarget == DNN_TARGET_HDDL ||
|
|
||||||
preferableTarget == DNN_TARGET_OPENCL ||
|
|
||||||
preferableTarget == DNN_TARGET_OPENCL_FP16)
|
|
||||||
customizable &= ld.type != "Concat";
|
|
||||||
|
|
||||||
if (preferableTarget == DNN_TARGET_OPENCL ||
|
|
||||||
preferableTarget == DNN_TARGET_OPENCL_FP16)
|
|
||||||
customizable &= ld.type != "Power";
|
|
||||||
|
|
||||||
if (preferableTarget == DNN_TARGET_OPENCL)
|
|
||||||
customizable &= ld.type != "Eltwise";
|
|
||||||
|
|
||||||
if (!customizable)
|
|
||||||
{
|
|
||||||
addInfEngineNetOutputs(ld);
|
|
||||||
net = Ptr<InfEngineBackendNet>();
|
|
||||||
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
|
|
||||||
layer->preferableTarget = DNN_TARGET_CPU;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ld.skip = true; // Initially skip all Inference Engine supported layers.
|
|
||||||
|
|
||||||
// Create a new network if one of inputs from different Inference Engine graph.
|
|
||||||
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
|
|
||||||
{
|
|
||||||
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
|
|
||||||
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
|
|
||||||
if (!inpNode.empty())
|
|
||||||
{
|
|
||||||
Ptr<InfEngineBackendNode> ieInpNode = inpNode.dynamicCast<InfEngineBackendNode>();
|
|
||||||
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
|
|
||||||
if (ieInpNode->net != net)
|
|
||||||
{
|
|
||||||
net = Ptr<InfEngineBackendNet>();
|
|
||||||
netBlobsWrappers.clear(); // Is not used for R5 release but we don't wrap it to #ifdef.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<BackendNode> node;
|
|
||||||
if (!net.empty())
|
|
||||||
{
|
|
||||||
if (fused)
|
|
||||||
{
|
|
||||||
bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 &&
|
|
||||||
ld.inputBlobs[0]->data == ld.outputBlobs[0].data;
|
|
||||||
CV_Assert(inPlace);
|
|
||||||
node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend];
|
|
||||||
ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else
|
|
||||||
net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet());
|
|
||||||
|
|
||||||
if (!fused)
|
|
||||||
{
|
|
||||||
if (layer->supportBackend(preferableBackend))
|
|
||||||
node = layer->initInfEngine(ld.inputBlobsWrappers);
|
|
||||||
else
|
|
||||||
{
|
|
||||||
node = Ptr<BackendNode>(new InfEngineBackendNode(
|
|
||||||
ld.layerInstance, ld.inputBlobs, ld.outputBlobs, ld.internals));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
else if (node.empty())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
CV_Assert(!node.empty());
|
|
||||||
ld.backendNodes[preferableBackend] = node;
|
|
||||||
|
|
||||||
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
|
|
||||||
CV_Assert(!ieNode.empty());
|
|
||||||
ieNode->net = net;
|
|
||||||
|
|
||||||
for (const auto& pin : blobsToKeep_)
|
|
||||||
{
|
|
||||||
if (pin.lid == ld.id)
|
|
||||||
{
|
|
||||||
ieNode->net->addOutput(ieNode->layer.getName());
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert weights in FP16 for specific targets.
|
|
||||||
if ((preferableTarget == DNN_TARGET_OPENCL_FP16 ||
|
|
||||||
preferableTarget == DNN_TARGET_MYRIAD ||
|
|
||||||
preferableTarget == DNN_TARGET_HDDL ||
|
|
||||||
preferableTarget == DNN_TARGET_FPGA) && !fused)
|
|
||||||
{
|
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
|
|
||||||
for (const std::string& name : {"weights", "biases"})
|
|
||||||
{
|
|
||||||
auto it = ieNode->layer.getParameters().find(name);
|
|
||||||
if (it != ieNode->layer.getParameters().end())
|
|
||||||
{
|
|
||||||
InferenceEngine::Blob::Ptr bp = it->second.as<InferenceEngine::Blob::Ptr>();
|
|
||||||
it->second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(bp));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
auto& blobs = ieNode->layer.getConstantData();
|
|
||||||
if (blobs.empty())
|
|
||||||
{
|
|
||||||
// In case of non weightable layer we have to specify
|
|
||||||
// it's precision adding dummy blob.
|
|
||||||
auto blob = InferenceEngine::make_shared_blob<int16_t>(
|
|
||||||
InferenceEngine::Precision::FP16,
|
|
||||||
InferenceEngine::Layout::C, {1});
|
|
||||||
blob->allocate();
|
|
||||||
blobs[""] = blob;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
for (auto& it : blobs)
|
|
||||||
it.second = convertFp16(std::const_pointer_cast<InferenceEngine::Blob>(it.second));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!fused)
|
|
||||||
net->addLayer(ieNode->layer);
|
|
||||||
|
|
||||||
net->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers, ieNode->layer.getName());
|
|
||||||
net->addBlobs(ld.inputBlobsWrappers);
|
|
||||||
net->addBlobs(ld.outputBlobsWrappers);
|
|
||||||
addInfEngineNetOutputs(ld);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize all networks.
|
|
||||||
for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it)
|
|
||||||
{
|
|
||||||
LayerData &ld = it->second;
|
|
||||||
if (ld.backendNodes.find(preferableBackend) == ld.backendNodes.end())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
Ptr<BackendNode> node = ld.backendNodes[preferableBackend];
|
|
||||||
if (node.empty())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
Ptr<InfEngineBackendNode> ieNode = node.dynamicCast<InfEngineBackendNode>();
|
|
||||||
if (ieNode.empty())
|
|
||||||
continue;
|
|
||||||
|
|
||||||
CV_Assert(!ieNode->net.empty());
|
|
||||||
|
|
||||||
if (!ieNode->net->isInitialized())
|
|
||||||
{
|
|
||||||
ieNode->net->init((Target)preferableTarget);
|
|
||||||
ld.skip = false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
/** mark input pins as outputs from other subnetworks
|
/** mark input pins as outputs from other subnetworks
|
||||||
@ -2162,7 +1786,7 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
void initNgraphBackend(const std::vector<LayerPin>& blobsToKeep_)
|
void initNgraphBackend(const std::vector<LayerPin>& blobsToKeep_)
|
||||||
{
|
{
|
||||||
CV_TRACE_FUNCTION();
|
CV_TRACE_FUNCTION();
|
||||||
CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, haveInfEngine());
|
CV_CheckEQ(preferableBackend, DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, "");
|
||||||
|
|
||||||
Ptr<InfEngineNgraphNet> net;
|
Ptr<InfEngineNgraphNet> net;
|
||||||
|
|
||||||
@ -2993,7 +2617,6 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
|
|
||||||
if(!fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
|
if(!fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
|
||||||
preferableBackend != DNN_BACKEND_CUDA &&
|
preferableBackend != DNN_BACKEND_CUDA &&
|
||||||
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
|
|
||||||
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
|
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -3814,15 +3437,13 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
{
|
{
|
||||||
forwardHalide(ld.outputBlobsWrappers, node);
|
forwardHalide(ld.outputBlobsWrappers, node);
|
||||||
}
|
}
|
||||||
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
#ifdef HAVE_INF_ENGINE
|
||||||
{
|
|
||||||
forwardInfEngine(ld.outputBlobsWrappers, node, isAsync);
|
|
||||||
}
|
|
||||||
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
forwardNgraph(ld.outputBlobsWrappers, node, isAsync);
|
forwardNgraph(ld.outputBlobsWrappers, node, isAsync);
|
||||||
}
|
}
|
||||||
else if (preferableBackend == DNN_BACKEND_WEBNN)
|
#endif
|
||||||
|
else if (preferableBackend == DNN_BACKEND_WEBNN)
|
||||||
{
|
{
|
||||||
forwardWebnn(ld.outputBlobsWrappers, node, isAsync);
|
forwardWebnn(ld.outputBlobsWrappers, node, isAsync);
|
||||||
}
|
}
|
||||||
@ -4181,27 +3802,13 @@ struct Net::Impl : public detail::NetImplBase
|
|||||||
// Transfer data to CPU if it's require.
|
// Transfer data to CPU if it's require.
|
||||||
ld.outputBlobsWrappers[pin.oid]->copyToHost();
|
ld.outputBlobsWrappers[pin.oid]->copyToHost();
|
||||||
}
|
}
|
||||||
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
||||||
|
|
||||||
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
|
Ptr<NgraphBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<NgraphBackendWrapper>();
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
return std::move(wrapper->futureMat);
|
||||||
Ptr<InfEngineBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<InfEngineBackendWrapper>();
|
|
||||||
return std::move(wrapper->futureMat);
|
|
||||||
#else
|
#else
|
||||||
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
|
CV_Error(Error::StsNotImplemented, "DNN: OpenVINO/nGraph backend is required");
|
||||||
#endif
|
|
||||||
}
|
|
||||||
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
{
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
|
||||||
Ptr<NgraphBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<NgraphBackendWrapper>();
|
|
||||||
return std::move(wrapper->futureMat);
|
|
||||||
#else
|
|
||||||
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
#endif // HAVE_INF_ENGINE
|
#endif // HAVE_INF_ENGINE
|
||||||
CV_Error(Error::StsNotImplemented, "DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 backend is required");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
AsyncArray getBlobAsync(String outputName)
|
AsyncArray getBlobAsync(String outputName)
|
||||||
@ -4275,40 +3882,18 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
|
|||||||
CV_TRACE_REGION_NEXT("backendNode");
|
CV_TRACE_REGION_NEXT("backendNode");
|
||||||
|
|
||||||
Ptr<BackendNode> backendNode;
|
Ptr<BackendNode> backendNode;
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
|
||||||
if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
|
|
||||||
{
|
{
|
||||||
auto fake_node = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{});
|
auto fake_node = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{});
|
||||||
Ptr<InfEngineNgraphNode> backendNodeNGraph(new InfEngineNgraphNode(fake_node));
|
Ptr<InfEngineNgraphNode> backendNodeNGraph(new InfEngineNgraphNode(fake_node));
|
||||||
backendNodeNGraph->net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet(*(cvNet.impl), ieNet));
|
backendNodeNGraph->net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet(*(cvNet.impl), ieNet));
|
||||||
backendNode = backendNodeNGraph;
|
backendNode = backendNodeNGraph;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
Ptr<InfEngineBackendNode> backendNodeNN(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
|
|
||||||
backendNodeNN->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
|
|
||||||
backendNode = backendNodeNN;
|
|
||||||
#else
|
|
||||||
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
CV_TRACE_REGION_NEXT("register_outputs");
|
CV_TRACE_REGION_NEXT("register_outputs");
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
|
||||||
auto ngraphFunction = ieNet.getFunction();
|
auto ngraphFunction = ieNet.getFunction();
|
||||||
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
|
CV_Assert(ngraphFunction);
|
||||||
std::list< std::shared_ptr<ngraph::Node> > ngraphOperations;
|
std::vector< std::shared_ptr<ngraph::Node> > ngraphOperations = ngraphFunction->get_ops();
|
||||||
#else
|
|
||||||
std::vector< std::shared_ptr<ngraph::Node> > ngraphOperations;
|
|
||||||
#endif
|
|
||||||
if (ngraphFunction)
|
|
||||||
{
|
|
||||||
ngraphOperations = ngraphFunction->get_ops();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
for (auto& it : ieNet.getOutputsInfo())
|
for (auto& it : ieNet.getOutputsInfo())
|
||||||
{
|
{
|
||||||
@ -4320,8 +3905,6 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
|
|||||||
|
|
||||||
LayerData& ld = cvNet.impl->layers[lid];
|
LayerData& ld = cvNet.impl->layers[lid];
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
|
||||||
if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
|
|
||||||
{
|
{
|
||||||
Ptr<Layer> cvLayer(new NgraphBackendLayer(ieNet));
|
Ptr<Layer> cvLayer(new NgraphBackendLayer(ieNet));
|
||||||
cvLayer->name = outputName;
|
cvLayer->name = outputName;
|
||||||
@ -4329,44 +3912,18 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
|
|||||||
|
|
||||||
auto process_layer = [&](const std::string& name) -> bool
|
auto process_layer = [&](const std::string& name) -> bool
|
||||||
{
|
{
|
||||||
if (ngraphFunction)
|
CV_TRACE_REGION("ngraph_function");
|
||||||
|
for (const auto& op : ngraphOperations)
|
||||||
{
|
{
|
||||||
CV_TRACE_REGION("ngraph_function");
|
CV_Assert(op);
|
||||||
for (const auto& op : ngraphOperations)
|
if (op->get_friendly_name() == name)
|
||||||
{
|
{
|
||||||
CV_Assert(op);
|
const std::string typeName = op->get_type_info().name;
|
||||||
if (op->get_friendly_name() == name)
|
cvLayer->type = typeName;
|
||||||
{
|
|
||||||
const std::string typeName = op->get_type_info().name;
|
|
||||||
cvLayer->type = typeName;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_4)
|
|
||||||
CV_Error(Error::StsNotImplemented, "This OpenCV version is built with Inference Engine which has dropped IR v7 support");
|
|
||||||
#else
|
|
||||||
CV_TRACE_REGION("legacy_cnn_layer");
|
|
||||||
try
|
|
||||||
{
|
|
||||||
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(name.c_str());
|
|
||||||
CV_Assert(ieLayer);
|
|
||||||
|
|
||||||
cvLayer->type = ieLayer->type;
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
catch (const std::exception& e)
|
|
||||||
{
|
|
||||||
CV_UNUSED(e);
|
|
||||||
CV_LOG_DEBUG(NULL, "IE layer extraction failure: '" << name << "' - " << e.what());
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
bool found = process_layer(outputName);
|
bool found = process_layer(outputName);
|
||||||
@ -4385,37 +3942,6 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
|
|||||||
ld.layerInstance = cvLayer;
|
ld.layerInstance = cvLayer;
|
||||||
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NGRAPH] = backendNode;
|
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NGRAPH] = backendNode;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
|
|
||||||
|
|
||||||
InferenceEngine::CNNLayerPtr ieLayer;
|
|
||||||
try
|
|
||||||
{
|
|
||||||
ieLayer = ieNet.getLayerByName(outputName.c_str());
|
|
||||||
}
|
|
||||||
catch (...)
|
|
||||||
{
|
|
||||||
auto pos = outputName.rfind('.'); // cut port number: ".0"
|
|
||||||
if (pos != std::string::npos)
|
|
||||||
{
|
|
||||||
std::string layerName = outputName.substr(0, pos);
|
|
||||||
ieLayer = ieNet.getLayerByName(layerName.c_str());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
CV_Assert(ieLayer);
|
|
||||||
|
|
||||||
cvLayer->name = outputName;
|
|
||||||
cvLayer->type = ieLayer->type;
|
|
||||||
ld.layerInstance = cvLayer;
|
|
||||||
|
|
||||||
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019] = backendNode;
|
|
||||||
#else
|
|
||||||
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support");
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < inputsNames.size(); ++i)
|
for (int i = 0; i < inputsNames.size(); ++i)
|
||||||
cvNet.connect(0, i, lid, i);
|
cvNet.connect(0, i, lid, i);
|
||||||
@ -4423,7 +3949,7 @@ Net Net::Impl::createNetworkFromModelOptimizer(InferenceEngine::CNNNetwork& ieNe
|
|||||||
|
|
||||||
CV_TRACE_REGION_NEXT("finalize");
|
CV_TRACE_REGION_NEXT("finalize");
|
||||||
|
|
||||||
cvNet.setPreferableBackend(getInferenceEngineBackendTypeParam());
|
cvNet.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
||||||
|
|
||||||
cvNet.impl->skipInfEngineInit = true;
|
cvNet.impl->skipInfEngineInit = true;
|
||||||
return cvNet;
|
return cvNet;
|
||||||
@ -4440,16 +3966,8 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
|
|||||||
|
|
||||||
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
|
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
|
||||||
|
|
||||||
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
|
|
||||||
InferenceEngine::CNNNetReader reader;
|
|
||||||
reader.ReadNetwork(xml);
|
|
||||||
reader.ReadWeights(bin);
|
|
||||||
|
|
||||||
InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
|
|
||||||
#else
|
|
||||||
InferenceEngine::Core& ie = getCore("");
|
InferenceEngine::Core& ie = getCore("");
|
||||||
InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin);
|
InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin);
|
||||||
#endif
|
|
||||||
|
|
||||||
return Impl::createNetworkFromModelOptimizer(ieNet);
|
return Impl::createNetworkFromModelOptimizer(ieNet);
|
||||||
#endif // HAVE_INF_ENGINE
|
#endif // HAVE_INF_ENGINE
|
||||||
@ -4478,26 +3996,6 @@ Net Net::readFromModelOptimizer(
|
|||||||
|
|
||||||
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
|
FPDenormalsIgnoreHintScope fp_denormals_ignore_scope;
|
||||||
|
|
||||||
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
|
|
||||||
InferenceEngine::CNNNetReader reader;
|
|
||||||
|
|
||||||
try
|
|
||||||
{
|
|
||||||
reader.ReadNetwork(bufferModelConfigPtr, bufferModelConfigSize);
|
|
||||||
|
|
||||||
InferenceEngine::TensorDesc tensorDesc(InferenceEngine::Precision::U8, { bufferWeightsSize }, InferenceEngine::Layout::C);
|
|
||||||
InferenceEngine::TBlob<uint8_t>::Ptr weightsBlobPtr(new InferenceEngine::TBlob<uint8_t>(tensorDesc));
|
|
||||||
weightsBlobPtr->allocate();
|
|
||||||
std::memcpy(weightsBlobPtr->buffer(), (uchar*)bufferWeightsPtr, bufferWeightsSize);
|
|
||||||
reader.SetWeights(weightsBlobPtr);
|
|
||||||
}
|
|
||||||
catch (const std::exception& e)
|
|
||||||
{
|
|
||||||
CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what());
|
|
||||||
}
|
|
||||||
|
|
||||||
InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
|
|
||||||
#else
|
|
||||||
InferenceEngine::Core& ie = getCore("");
|
InferenceEngine::Core& ie = getCore("");
|
||||||
|
|
||||||
std::string model; model.assign((char*)bufferModelConfigPtr, bufferModelConfigSize);
|
std::string model; model.assign((char*)bufferModelConfigPtr, bufferModelConfigSize);
|
||||||
@ -4514,7 +4012,6 @@ Net Net::readFromModelOptimizer(
|
|||||||
{
|
{
|
||||||
CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what());
|
CV_Error(Error::StsError, std::string("DNN: IE failed to load model: ") + e.what());
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
return Impl::createNetworkFromModelOptimizer(ieNet);
|
return Impl::createNetworkFromModelOptimizer(ieNet);
|
||||||
#endif // HAVE_INF_ENGINE
|
#endif // HAVE_INF_ENGINE
|
||||||
@ -4621,8 +4118,8 @@ AsyncArray Net::forwardAsync(const String& outputName)
|
|||||||
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
|
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
|
||||||
impl->setUpNet(pins);
|
impl->setUpNet(pins);
|
||||||
|
|
||||||
if (!(impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
|
if (impl->preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward is supported for Inference Engine backends only");
|
CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward is supported for Inference Engine backend only");
|
||||||
|
|
||||||
impl->isAsync = true;
|
impl->isAsync = true;
|
||||||
impl->forwardToLayer(impl->getLayerData(layerName));
|
impl->forwardToLayer(impl->getLayerData(layerName));
|
||||||
@ -5047,7 +4544,7 @@ void Net::setPreferableBackend(int backendId)
|
|||||||
|
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
|
||||||
backendId = getInferenceEngineBackendTypeParam();
|
backendId = DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if( impl->preferableBackend != backendId )
|
if( impl->preferableBackend != backendId )
|
||||||
@ -5292,8 +4789,8 @@ string Net::Impl::dump() const
|
|||||||
case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break;
|
case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break;
|
||||||
case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break;
|
case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break;
|
||||||
case DNN_BACKEND_INFERENCE_ENGINE: // fallthru
|
case DNN_BACKEND_INFERENCE_ENGINE: // fallthru
|
||||||
case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: backend = "DLIE/"; break;
|
case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: // fallthru
|
||||||
case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "NGRAPH/"; break;
|
case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "OpenVINO/"; break;
|
||||||
case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
|
case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
|
||||||
case DNN_BACKEND_VKCOM: backend = "VULKAN/"; break;
|
case DNN_BACKEND_VKCOM: backend = "VULKAN/"; break;
|
||||||
case DNN_BACKEND_CUDA: backend = "CUDA/"; break;
|
case DNN_BACKEND_CUDA: backend = "CUDA/"; break;
|
||||||
@ -5894,13 +5391,6 @@ Ptr<BackendNode> Layer::initHalide(const std::vector<Ptr<BackendWrapper> > &)
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<BackendNode> Layer::initInfEngine(const std::vector<Ptr<BackendWrapper> > &)
|
|
||||||
{
|
|
||||||
CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type +
|
|
||||||
" layers is not defined.");
|
|
||||||
return Ptr<BackendNode>();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<BackendNode> Layer::initNgraph(const std::vector<Ptr<BackendWrapper> > & inputs, const std::vector<Ptr<BackendNode> >& nodes)
|
Ptr<BackendNode> Layer::initNgraph(const std::vector<Ptr<BackendWrapper> > & inputs, const std::vector<Ptr<BackendNode> >& nodes)
|
||||||
{
|
{
|
||||||
CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type +
|
CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type +
|
||||||
|
@ -170,11 +170,14 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return preferableTarget == DNN_TARGET_CPU || dims == 4;
|
||||||
|
#endif
|
||||||
return (backendId == DNN_BACKEND_OPENCV) ||
|
return (backendId == DNN_BACKEND_OPENCV) ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
(backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
|
(backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
|
||||||
backendId == DNN_BACKEND_WEBNN ||
|
backendId == DNN_BACKEND_WEBNN;
|
||||||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_OPENCL
|
#ifdef HAVE_OPENCL
|
||||||
@ -382,16 +385,6 @@ public:
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name);
|
|
||||||
const size_t numChannels = weights_.total();
|
|
||||||
addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
|
|
||||||
addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer);
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -63,9 +63,12 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA;
|
||||||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
||||||
@ -116,32 +119,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
|
|
||||||
std::vector<size_t> dims = input->getDims();
|
|
||||||
CV_Assert(!dims.empty());
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer ieLayer(name);
|
|
||||||
ieLayer.setName(name);
|
|
||||||
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
|
|
||||||
{
|
|
||||||
ieLayer.setType("Copy");
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
ieLayer.setType("Split");
|
|
||||||
ieLayer.getParameters()["axis"] = dims.size() - 1;
|
|
||||||
ieLayer.getParameters()["out_sizes"] = dims[0];
|
|
||||||
}
|
|
||||||
ieLayer.setInputPorts({InferenceEngine::Port(dims)});
|
|
||||||
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -113,11 +113,13 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels
|
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels
|
||||||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !padding) ||
|
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
|
|
||||||
(backendId == DNN_BACKEND_WEBNN && !padding) ||
|
(backendId == DNN_BACKEND_WEBNN && !padding) ||
|
||||||
(backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding);
|
(backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding);
|
||||||
}
|
}
|
||||||
@ -343,18 +345,6 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::ConcatLayer ieLayer(name);
|
|
||||||
ieLayer.setAxis(normalize_axis(axis, input->getDims().size()));
|
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
|
@ -34,9 +34,11 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
|
|
||||||
backendId == DNN_BACKEND_WEBNN ||
|
backendId == DNN_BACKEND_WEBNN ||
|
||||||
backendId == DNN_BACKEND_CUDA;
|
backendId == DNN_BACKEND_CUDA;
|
||||||
}
|
}
|
||||||
@ -78,16 +80,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::ConstLayer ieLayer(name);
|
|
||||||
ieLayer.setData(wrapToInfEngineBlob(blobs[0]));
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -330,7 +330,7 @@ public:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
|
bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
|
||||||
if (isArmTarget && blobs.empty())
|
if (isArmTarget && blobs.empty())
|
||||||
@ -340,7 +340,7 @@ public:
|
|||||||
if (ksize == 3)
|
if (ksize == 3)
|
||||||
return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
|
return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
|
||||||
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
||||||
if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || !isMyriad) && blobs.empty())
|
if (!isMyriad && blobs.empty())
|
||||||
return false;
|
return false;
|
||||||
return (!isMyriad || dilation.width == dilation.height);
|
return (!isMyriad || dilation.width == dilation.height);
|
||||||
}
|
}
|
||||||
@ -761,69 +761,6 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
CV_Assert(!blobs.empty());
|
|
||||||
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
|
|
||||||
std::vector<size_t> dims = input->getDims();
|
|
||||||
CV_Assert(dims.size() == 4 || dims.size() == 5);
|
|
||||||
const int inpCn = dims[1];
|
|
||||||
const int outCn = blobs[0].size[0];
|
|
||||||
const int inpGroupCn = blobs[0].size[1];
|
|
||||||
const int group = inpCn / inpGroupCn;
|
|
||||||
InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
|
|
||||||
InferenceEngine::Layout::NCDHW;
|
|
||||||
|
|
||||||
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
|
|
||||||
if (fusedWeights)
|
|
||||||
{
|
|
||||||
if (weightsMat.isContinuous())
|
|
||||||
{
|
|
||||||
Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
|
|
||||||
ieWeights = wrapToInfEngineBlob(cvWeights, layout);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
ieWeights = InferenceEngine::make_shared_blob<float>({
|
|
||||||
InferenceEngine::Precision::FP32,
|
|
||||||
ieWeights->getTensorDesc().getDims(), layout
|
|
||||||
});
|
|
||||||
ieWeights->allocate();
|
|
||||||
|
|
||||||
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
|
|
||||||
Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
|
|
||||||
cvWeights.copyTo(newWeights);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
InferenceEngine::Blob::Ptr ieBiases;
|
|
||||||
if (hasBias() || fusedBias)
|
|
||||||
{
|
|
||||||
Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
|
|
||||||
ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
|
|
||||||
}
|
|
||||||
|
|
||||||
InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
|
|
||||||
|
|
||||||
ieLayer.setKernel(kernel_size);
|
|
||||||
ieLayer.setStrides(strides);
|
|
||||||
ieLayer.setDilation(dilations);
|
|
||||||
ieLayer.setPaddingsBegin(pads_begin);
|
|
||||||
ieLayer.setPaddingsEnd(pads_end);
|
|
||||||
ieLayer.setGroup((size_t)group);
|
|
||||||
ieLayer.setOutDepth((size_t)outCn);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
addConstantData("weights", ieWeights, l);
|
|
||||||
if (ieBiases)
|
|
||||||
addConstantData("biases", ieBiases, l);
|
|
||||||
|
|
||||||
if (!padMode.empty())
|
|
||||||
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
|
|
||||||
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||||
@ -2319,52 +2256,6 @@ public:
|
|||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
|
||||||
return group == 1;
|
return group == 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
|
||||||
{
|
|
||||||
if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (std::accumulate(adjust_pads.begin(), adjust_pads.end(), 0, std::plus<size_t>()) > 0)
|
|
||||||
{
|
|
||||||
if (padMode.empty())
|
|
||||||
{
|
|
||||||
if (preferableTarget != DNN_TARGET_CPU && group != 1)
|
|
||||||
{
|
|
||||||
for (int i = 0; i < adjust_pads.size(); i++) {
|
|
||||||
if (adjust_pads[i] && pads_begin[i])
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for (int i = 0; i < adjust_pads.size(); i++) {
|
|
||||||
if (pads_end[i] < adjust_pads[i])
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
else if (padMode == "SAME")
|
|
||||||
{
|
|
||||||
for (int i = 0; i < adjust_pads.size(); i++) {
|
|
||||||
if (kernel_size[i] < pads_begin[i] + 1 + adjust_pads[i])
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
else if (padMode == "VALID")
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (group != 1)
|
|
||||||
{
|
|
||||||
return preferableTarget == DNN_TARGET_CPU;
|
|
||||||
}
|
|
||||||
if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
|
|
||||||
return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies<size_t>()) == 1;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
#endif // HAVE_INF_ENGINE
|
#endif // HAVE_INF_ENGINE
|
||||||
{
|
{
|
||||||
return backendId == DNN_BACKEND_CUDA ||
|
return backendId == DNN_BACKEND_CUDA ||
|
||||||
@ -3022,64 +2913,6 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
CV_Assert(!blobs.empty());
|
|
||||||
InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW :
|
|
||||||
InferenceEngine::Layout::OIHW;
|
|
||||||
|
|
||||||
auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
|
|
||||||
if (fusedWeights)
|
|
||||||
{
|
|
||||||
ieWeights = InferenceEngine::make_shared_blob<float>({
|
|
||||||
InferenceEngine::Precision::FP32,
|
|
||||||
ieWeights->getTensorDesc().getDims(), layout
|
|
||||||
});
|
|
||||||
ieWeights->allocate();
|
|
||||||
|
|
||||||
int inpCn = blobs[0].size[0];
|
|
||||||
Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
|
|
||||||
transpose(weightsMat, newWeights);
|
|
||||||
}
|
|
||||||
|
|
||||||
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or OIDHW layout
|
|
||||||
const int group = numOutput / outGroupCn;
|
|
||||||
|
|
||||||
InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
|
|
||||||
|
|
||||||
ieLayer.setKernel(kernel_size);
|
|
||||||
ieLayer.setStrides(strides);
|
|
||||||
ieLayer.setDilation(dilations);
|
|
||||||
ieLayer.setPaddingsBegin(pads_begin);
|
|
||||||
|
|
||||||
if (padMode.empty())
|
|
||||||
{
|
|
||||||
std::vector<size_t> paddings_end;
|
|
||||||
for (int i = 0; i < pads_end.size(); i++) {
|
|
||||||
paddings_end.push_back(pads_end[i] - adjust_pads[i]);
|
|
||||||
}
|
|
||||||
ieLayer.setPaddingsEnd(paddings_end);
|
|
||||||
}
|
|
||||||
else if (padMode == "SAME")
|
|
||||||
{
|
|
||||||
std::vector<size_t> paddings_end;
|
|
||||||
for (int i = 0; i < pads_begin.size(); i++) {
|
|
||||||
paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]);
|
|
||||||
}
|
|
||||||
ieLayer.setPaddingsEnd(paddings_end);
|
|
||||||
}
|
|
||||||
ieLayer.setGroup((size_t)group);
|
|
||||||
ieLayer.setOutDepth((size_t)numOutput);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
addConstantData("weights", ieWeights, l);
|
|
||||||
if (hasBias())
|
|
||||||
addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||||
|
@ -221,7 +221,7 @@ public:
|
|||||||
{
|
{
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
(backendId == DNN_BACKEND_CUDA && !_groupByClasses) ||
|
(backendId == DNN_BACKEND_CUDA && !_groupByClasses) ||
|
||||||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !_locPredTransposed && _bboxesNormalized);
|
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && !_locPredTransposed && _bboxesNormalized);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
||||||
@ -1001,30 +1001,6 @@ public:
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::DetectionOutputLayer ieLayer(name);
|
|
||||||
|
|
||||||
ieLayer.setNumClasses(_numClasses);
|
|
||||||
ieLayer.setShareLocation(_shareLocation);
|
|
||||||
ieLayer.setBackgroudLabelId(_backgroundLabelId);
|
|
||||||
ieLayer.setNMSThreshold(_nmsThreshold);
|
|
||||||
ieLayer.setTopK(_topK > 0 ? _topK : _keepTopK);
|
|
||||||
ieLayer.setKeepTopK(_keepTopK);
|
|
||||||
ieLayer.setConfidenceThreshold(_confidenceThreshold);
|
|
||||||
ieLayer.setVariantEncodedInTarget(_varianceEncodedInTarget);
|
|
||||||
ieLayer.setCodeType("caffe.PriorBoxParameter." + _codeType);
|
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(3));
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
l.getParameters()["eta"] = std::string("1.0");
|
|
||||||
l.getParameters()["clip"] = _clip;
|
|
||||||
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -186,14 +186,6 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::Layer ieLayer = func.initInfEngineBuilderAPI();
|
|
||||||
ieLayer.setName(this->name);
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
@ -341,10 +333,6 @@ struct ReLUFunctor : public BaseFunctor
|
|||||||
|
|
||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
|
||||||
return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
return true;
|
return true;
|
||||||
@ -462,13 +450,6 @@ struct ReLUFunctor : public BaseFunctor
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
|
||||||
{
|
|
||||||
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(slope);
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
{
|
{
|
||||||
@ -534,11 +515,14 @@ struct ReLU6Functor : public BaseFunctor
|
|||||||
|
|
||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_HALIDE ||
|
backendId == DNN_BACKEND_HALIDE ||
|
||||||
backendId == DNN_BACKEND_WEBNN ||
|
backendId == DNN_BACKEND_WEBNN;
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
||||||
@ -620,12 +604,6 @@ struct ReLU6Functor : public BaseFunctor
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
|
||||||
{
|
|
||||||
return InferenceEngine::Builder::ClampLayer("").setMinValue(minValue).setMaxValue(maxValue);
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
@ -743,12 +721,6 @@ struct BaseDefaultFunctor : public BaseFunctor
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
|
||||||
{
|
|
||||||
CV_Error(Error::StsNotImplemented, "");
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
@ -782,10 +754,13 @@ struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
|
|||||||
|
|
||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_HALIDE ||
|
backendId == DNN_BACKEND_HALIDE;
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline float calculate(float x) const
|
inline float calculate(float x) const
|
||||||
@ -808,13 +783,6 @@ struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
|
||||||
{
|
|
||||||
return InferenceEngine::Builder::TanHLayer("");
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
{
|
{
|
||||||
@ -937,10 +905,13 @@ struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
|
|||||||
|
|
||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_HALIDE ||
|
backendId == DNN_BACKEND_HALIDE;
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline float calculate(float x) const
|
inline float calculate(float x) const
|
||||||
@ -963,12 +934,6 @@ struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
|
||||||
{
|
|
||||||
return InferenceEngine::Builder::SigmoidLayer("");
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
@ -992,10 +957,13 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
|
|||||||
|
|
||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_HALIDE ||
|
backendId == DNN_BACKEND_HALIDE;
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
inline float calculate(float x) const
|
inline float calculate(float x) const
|
||||||
@ -1023,13 +991,6 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
|
||||||
{
|
|
||||||
return InferenceEngine::Builder::ELULayer("");
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
{
|
{
|
||||||
@ -1050,8 +1011,8 @@ struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
|
|||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
|
return true;
|
||||||
#endif
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
@ -1078,12 +1039,6 @@ struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
|
||||||
{
|
|
||||||
return InferenceEngine::Builder::ReLULayer("").setNegativeSlope(-0.999999f);
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
@ -1930,14 +1885,15 @@ struct PowerFunctor : public BaseFunctor
|
|||||||
|
|
||||||
bool supportBackend(int backendId, int targetId)
|
bool supportBackend(int backendId, int targetId)
|
||||||
{
|
{
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
#ifdef HAVE_INF_ENGINE
|
||||||
return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5;
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
return true;
|
return true;
|
||||||
else
|
#endif
|
||||||
|
{
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_HALIDE;
|
backendId == DNN_BACKEND_HALIDE;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void finalize()
|
void finalize()
|
||||||
@ -2029,14 +1985,6 @@ struct PowerFunctor : public BaseFunctor
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
|
||||||
{
|
|
||||||
return InferenceEngine::Builder::PowerLayer("").setPower(power)
|
|
||||||
.setScale(scale)
|
|
||||||
.setShift(shift);
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
@ -2189,10 +2137,13 @@ struct ChannelsPReLUFunctor : public BaseFunctor
|
|||||||
|
|
||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_HALIDE ||
|
backendId == DNN_BACKEND_HALIDE;
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
||||||
@ -2282,15 +2233,6 @@ struct ChannelsPReLUFunctor : public BaseFunctor
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
InferenceEngine::Builder::Layer initInfEngineBuilderAPI()
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer("");
|
|
||||||
const size_t numChannels = scale.total();
|
|
||||||
addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l);
|
|
||||||
return l;
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
|
@ -164,6 +164,11 @@ public:
|
|||||||
if (hasVecInput && ELTWISE_CHANNNELS_SAME)
|
if (hasVecInput && ELTWISE_CHANNNELS_SAME)
|
||||||
return backendId == DNN_BACKEND_OPENCV;
|
return backendId == DNN_BACKEND_OPENCV;
|
||||||
|
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return channelsMode == ELTWISE_CHANNNELS_SAME;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_CUDA)
|
if (backendId == DNN_BACKEND_CUDA)
|
||||||
{
|
{
|
||||||
if(channelsModeInput == ELTWISE_CHANNNELS_INPUT_0 || channelsModeInput == ELTWISE_CHANNNELS_INPUT_0_TRUNCATE)
|
if(channelsModeInput == ELTWISE_CHANNNELS_INPUT_0 || channelsModeInput == ELTWISE_CHANNNELS_INPUT_0_TRUNCATE)
|
||||||
@ -172,9 +177,8 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
(backendId == DNN_BACKEND_HALIDE && op != DIV) || // TODO: not implemented, see PR #15811
|
(backendId == DNN_BACKEND_HALIDE && op != DIV) // TODO: not implemented, see PR #15811
|
||||||
((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()))
|
;
|
||||||
|| backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && channelsMode == ELTWISE_CHANNNELS_SAME));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
||||||
@ -837,34 +841,6 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::EltwiseLayer ieLayer(name);
|
|
||||||
|
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
|
|
||||||
|
|
||||||
if (op == SUM)
|
|
||||||
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::SUM);
|
|
||||||
else if (op == PROD)
|
|
||||||
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MUL);
|
|
||||||
else if (op == DIV)
|
|
||||||
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::DIV);
|
|
||||||
else if (op == MAX)
|
|
||||||
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MAX);
|
|
||||||
else if (op == MIN)
|
|
||||||
ieLayer.setEltwiseType(InferenceEngine::Builder::EltwiseLayer::EltwiseType::MIN);
|
|
||||||
else
|
|
||||||
CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
if (!coeffs.empty())
|
|
||||||
l.getParameters()["coeff"] = coeffs;
|
|
||||||
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
|
@ -72,9 +72,12 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA;
|
||||||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
||||||
@ -171,25 +174,10 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::Layer ieLayer(name);
|
|
||||||
ieLayer.setName(name);
|
|
||||||
ieLayer.setType("Flatten");
|
|
||||||
ieLayer.getParameters()["axis"] = (size_t)_startAxis;
|
|
||||||
ieLayer.getParameters()["end_axis"] = _endAxis; // Do not cast to size_t because it might be negative.
|
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
|
|
||||||
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
|
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
|
||||||
std::vector<size_t> dims = ieInpNode->get_shape();
|
std::vector<size_t> dims = ieInpNode->get_shape();
|
||||||
|
|
||||||
|
@ -148,12 +148,15 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return axis == 1;
|
||||||
|
#endif
|
||||||
|
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) ||
|
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) ||
|
||||||
(backendId == DNN_BACKEND_WEBNN && axis == 1) ||
|
(backendId == DNN_BACKEND_WEBNN && axis == 1);
|
||||||
(((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && !blobs.empty()) ||
|
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && axis == 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
|
virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
|
||||||
@ -570,23 +573,6 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::FullyConnectedLayer ieLayer(name);
|
|
||||||
|
|
||||||
const int outNum = blobs[0].size[0];
|
|
||||||
ieLayer.setOutputNum(outNum);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW), l);
|
|
||||||
if (bias)
|
|
||||||
addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l);
|
|
||||||
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
|
@ -99,12 +99,10 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
return bias == (int)bias;
|
return bias == (int)bias;
|
||||||
}
|
#endif
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
|
|
||||||
return bias == (int)bias;
|
|
||||||
}
|
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_HALIDE ||
|
backendId == DNN_BACKEND_HALIDE ||
|
||||||
@ -444,24 +442,6 @@ public:
|
|||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
float alphaSize = alpha;
|
|
||||||
if (!normBySize)
|
|
||||||
alphaSize *= (type == SPATIAL_NRM ? size*size : size);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::NormLayer ieLayer(name);
|
|
||||||
ieLayer.setSize(size);
|
|
||||||
ieLayer.setAlpha(alphaSize);
|
|
||||||
ieLayer.setBeta(beta);
|
|
||||||
ieLayer.setAcrossMaps(type == CHANNEL_NRM);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
l.getParameters()["k"] = bias;
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -124,14 +124,7 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
|
||||||
{
|
|
||||||
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
|
||||||
return !zeroDev && (!isMyriad || eps <= 1e-7f);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
@ -387,16 +380,6 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::MVNLayer ieLayer(name);
|
|
||||||
ieLayer.setAcrossChannels(acrossChannels);
|
|
||||||
ieLayer.setNormalize(normVariance);
|
|
||||||
ieLayer.setEpsilon(eps);
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
|
@ -70,17 +70,15 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
if (pnorm != 2)
|
if (pnorm != 2)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && isMyriad)
|
|
||||||
return !acrossSpatial;
|
|
||||||
|
|
||||||
return startAxis == 1;
|
return startAxis == 1;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
(backendId == DNN_BACKEND_CUDA && (pnorm == 1 || pnorm == 2));
|
(backendId == DNN_BACKEND_CUDA && (pnorm == 1 || pnorm == 2));
|
||||||
}
|
}
|
||||||
@ -270,58 +268,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
|
|
||||||
std::vector<size_t> dims = input->getDims();
|
|
||||||
if (dims.size() == 4)
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::NormalizeLayer ieLayer(name);
|
|
||||||
|
|
||||||
ieLayer.setChannelShared(false);
|
|
||||||
ieLayer.setAcrossMaps(acrossSpatial);
|
|
||||||
ieLayer.setEpsilon(epsilon);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
const int numChannels = dims[1];
|
|
||||||
InferenceEngine::Blob::Ptr weights;
|
|
||||||
if (blobs.empty())
|
|
||||||
{
|
|
||||||
weights = InferenceEngine::make_shared_blob<float>({
|
|
||||||
InferenceEngine::Precision::FP32,
|
|
||||||
{(size_t)numChannels}, InferenceEngine::Layout::C
|
|
||||||
});
|
|
||||||
weights->allocate();
|
|
||||||
|
|
||||||
Mat weightsMat = infEngineBlobToMat(weights).reshape(1, numChannels);
|
|
||||||
Mat(numChannels, 1, CV_32F, Scalar(1)).copyTo(weightsMat);
|
|
||||||
l.getParameters()["channel_shared"] = false;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
CV_Assert(numChannels == blobs[0].total());
|
|
||||||
weights = wrapToInfEngineBlob(blobs[0], {(size_t)numChannels}, InferenceEngine::Layout::C);
|
|
||||||
l.getParameters()["channel_shared"] = blobs[0].total() == 1;
|
|
||||||
}
|
|
||||||
addConstantData("weights", weights, l);
|
|
||||||
l.getParameters()["across_spatial"] = acrossSpatial;
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::GRNLayer ieLayer(name);
|
|
||||||
ieLayer.setBeta(epsilon);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
l.getParameters()["bias"] = epsilon;
|
|
||||||
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -87,11 +87,6 @@ public:
|
|||||||
CV_Error(Error::StsNotImplemented, msg);
|
CV_Error(Error::StsNotImplemented, msg);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
CV_Error(Error::StsNotImplemented, msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
@ -102,10 +102,10 @@ public:
|
|||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
||||||
if (INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && isMyriad)
|
if (isMyriad)
|
||||||
return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0;
|
return dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0;
|
||||||
|
|
||||||
return (dstRanges.size() <= 4 || !isArmComputePlugin());
|
return (dstRanges.size() <= 4 || !isArmComputePlugin());
|
||||||
@ -219,30 +219,6 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::Layer ieLayer(name);
|
|
||||||
ieLayer.setName(name);
|
|
||||||
ieLayer.setType("Pad");
|
|
||||||
|
|
||||||
std::vector<int> begins(paddings.size(), 0), ends(paddings.size(), 0);
|
|
||||||
for (int i = 0; i < paddings.size(); ++i)
|
|
||||||
{
|
|
||||||
begins[i] = paddings[i].first;
|
|
||||||
ends[i] = paddings[i].second;
|
|
||||||
}
|
|
||||||
ieLayer.getParameters()["pads_begin"] = begins;
|
|
||||||
ieLayer.getParameters()["pads_end"] = ends;
|
|
||||||
ieLayer.getParameters()["pad_mode"] = paddingType;
|
|
||||||
if (paddingType == "constant")
|
|
||||||
ieLayer.getParameters()["pad_value"] = paddingValue;
|
|
||||||
|
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
|
|
||||||
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
|
@ -115,13 +115,16 @@ public:
|
|||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && preferableTarget == DNN_TARGET_CPU)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
return _order.size() <= 4 || !isArmComputePlugin();
|
{
|
||||||
|
if (preferableTarget == DNN_TARGET_CPU)
|
||||||
|
return _order.size() <= 4 || !isArmComputePlugin();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_WEBNN ||
|
backendId == DNN_BACKEND_WEBNN ||
|
||||||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) ||
|
|
||||||
(backendId == DNN_BACKEND_VKCOM && haveVulkan());
|
(backendId == DNN_BACKEND_VKCOM && haveVulkan());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -418,16 +421,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::PermuteLayer ieLayer(name);
|
|
||||||
ieLayer.setOrder(_order);
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -199,34 +199,13 @@ public:
|
|||||||
{
|
{
|
||||||
return type == MAX || type == AVE || type == ROI;
|
return type == MAX || type == AVE || type == ROI;
|
||||||
}
|
}
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
|
||||||
{
|
|
||||||
if (computeMaxIdx)
|
|
||||||
return false;
|
|
||||||
if (kernel_size.size() == 3)
|
|
||||||
return preferableTarget == DNN_TARGET_CPU;
|
|
||||||
if (kernel_size.size() == 1)
|
|
||||||
return false;
|
|
||||||
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL) {
|
|
||||||
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
|
|
||||||
if (type == MAX && (pads_begin[1] == 1 && pads_begin[0] == 1) && (strides[0] == 2 && strides[1] == 2)) {
|
|
||||||
return !isMyriadX();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
return type == MAX || type == AVE;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
return type != STOCHASTIC && type != SUM;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
|
||||||
return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1 && (kernel_size.size() != 3 || !isArmComputePlugin());
|
return !computeMaxIdx && type != STOCHASTIC && kernel_size.size() > 1 && (kernel_size.size() != 3 || !isArmComputePlugin());
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
else if (backendId == DNN_BACKEND_OPENCV)
|
#endif
|
||||||
|
if (backendId == DNN_BACKEND_OPENCV)
|
||||||
{
|
{
|
||||||
if (kernel_size.size() == 3)
|
if (kernel_size.size() == 3)
|
||||||
return preferableTarget == DNN_TARGET_CPU;
|
return preferableTarget == DNN_TARGET_CPU;
|
||||||
@ -550,54 +529,6 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
if (type == MAX || type == AVE)
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::PoolingLayer ieLayer(name);
|
|
||||||
|
|
||||||
ieLayer.setKernel(kernel_size);
|
|
||||||
ieLayer.setStrides(strides);
|
|
||||||
ieLayer.setPaddingsBegin(pads_begin);
|
|
||||||
ieLayer.setPaddingsEnd(pads_end);
|
|
||||||
|
|
||||||
ieLayer.setPoolingType(type == MAX ?
|
|
||||||
InferenceEngine::Builder::PoolingLayer::PoolingType::MAX :
|
|
||||||
InferenceEngine::Builder::PoolingLayer::PoolingType::AVG);
|
|
||||||
ieLayer.setRoundingType(ceilMode ?
|
|
||||||
InferenceEngine::Builder::PoolingLayer::RoundingType::CEIL :
|
|
||||||
InferenceEngine::Builder::PoolingLayer::RoundingType::FLOOR);
|
|
||||||
ieLayer.setExcludePad(!avePoolPaddedArea);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
if (!padMode.empty())
|
|
||||||
l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
else if (type == ROI)
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::ROIPoolingLayer ieLayer(name);
|
|
||||||
ieLayer.setSpatialScale(spatialScale);
|
|
||||||
ieLayer.setPooled({pooledSize.height, pooledSize.width});
|
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
else if (type == PSROI)
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::PSROIPoolingLayer ieLayer(name);
|
|
||||||
ieLayer.setSpatialScale(spatialScale);
|
|
||||||
ieLayer.setOutputDim(psRoiOutChannels);
|
|
||||||
ieLayer.setGroupSize(pooledSize.width);
|
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
CV_Error(Error::StsNotImplemented, "Unsupported pooling type");
|
|
||||||
return Ptr<BackendNode>();
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
|
@ -298,9 +298,7 @@ public:
|
|||||||
#endif
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() &&
|
(backendId == DNN_BACKEND_VKCOM && haveVulkan());
|
||||||
( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1)))
|
|
||||||
|| (backendId == DNN_BACKEND_VKCOM && haveVulkan());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
||||||
@ -510,69 +508,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
if (_explicitSizes)
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::PriorBoxClusteredLayer ieLayer(name);
|
|
||||||
ieLayer.setSteps({_stepY, _stepX});
|
|
||||||
|
|
||||||
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
|
|
||||||
ieLayer.setOffset(_offsetsX[0]);
|
|
||||||
|
|
||||||
ieLayer.setClip(_clip);
|
|
||||||
ieLayer.setFlip(false); // We already flipped aspect ratios.
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
|
|
||||||
CV_Assert_N(!_boxWidths.empty(), !_boxHeights.empty(), !_variance.empty());
|
|
||||||
CV_Assert(_boxWidths.size() == _boxHeights.size());
|
|
||||||
l.getParameters()["width"] = _boxWidths;
|
|
||||||
l.getParameters()["height"] = _boxHeights;
|
|
||||||
l.getParameters()["variance"] = _variance;
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::PriorBoxLayer ieLayer(name);
|
|
||||||
|
|
||||||
CV_Assert(!_explicitSizes);
|
|
||||||
ieLayer.setMinSize(_minSize[0]);
|
|
||||||
if (!_maxSize.empty())
|
|
||||||
ieLayer.setMaxSize(_maxSize[0]);
|
|
||||||
|
|
||||||
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
|
|
||||||
ieLayer.setOffset(_offsetsX[0]);
|
|
||||||
|
|
||||||
ieLayer.setClip(_clip);
|
|
||||||
ieLayer.setFlip(false); // We already flipped aspect ratios.
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer l = ieLayer;
|
|
||||||
if (_stepX == _stepY)
|
|
||||||
{
|
|
||||||
l.getParameters()["step"] = _stepX;
|
|
||||||
l.getParameters()["step_h"] = 0.0f;
|
|
||||||
l.getParameters()["step_w"] = 0.0f;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
l.getParameters()["step"] = 0.0f;
|
|
||||||
l.getParameters()["step_h"] = _stepY;
|
|
||||||
l.getParameters()["step_w"] = _stepX;
|
|
||||||
}
|
|
||||||
if (!_aspectRatios.empty())
|
|
||||||
{
|
|
||||||
l.getParameters()["aspect_ratio"] = _aspectRatios;
|
|
||||||
}
|
|
||||||
CV_Assert(!_variance.empty());
|
|
||||||
l.getParameters()["variance"] = _variance;
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
@ -96,7 +96,7 @@ public:
|
|||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
|
||||||
return !isMyriad;
|
return !isMyriad;
|
||||||
@ -338,32 +338,6 @@ public:
|
|||||||
layerOutputs[0].col(2).copyTo(dst);
|
layerOutputs[0].col(2).copyTo(dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::ProposalLayer ieLayer(name);
|
|
||||||
|
|
||||||
ieLayer.setBaseSize(baseSize);
|
|
||||||
ieLayer.setFeatStride(featStride);
|
|
||||||
ieLayer.setMinSize(16);
|
|
||||||
ieLayer.setNMSThresh(nmsThreshold);
|
|
||||||
ieLayer.setPostNMSTopN(keepTopAfterNMS);
|
|
||||||
ieLayer.setPreNMSTopN(keepTopBeforeNMS);
|
|
||||||
|
|
||||||
std::vector<float> scalesVec(scales.size());
|
|
||||||
for (int i = 0; i < scales.size(); ++i)
|
|
||||||
scalesVec[i] = scales.get<float>(i);
|
|
||||||
ieLayer.setScale(scalesVec);
|
|
||||||
|
|
||||||
std::vector<float> ratiosVec(ratios.size());
|
|
||||||
for (int i = 0; i < ratios.size(); ++i)
|
|
||||||
ratiosVec[i] = ratios.get<float>(i);
|
|
||||||
ieLayer.setRatio(ratiosVec);
|
|
||||||
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
|
@ -151,10 +151,12 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA;
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_OPENCL
|
#ifdef HAVE_OPENCL
|
||||||
@ -198,16 +200,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::ReorgYoloLayer ieLayer(name);
|
|
||||||
ieLayer.setStride(reorgStride);
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -202,10 +202,13 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_WEBNN ||
|
backendId == DNN_BACKEND_WEBNN;
|
||||||
((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
||||||
@ -306,17 +309,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::ReshapeLayer ieLayer(name);
|
|
||||||
CV_Assert(outShapes.size() == 1);
|
|
||||||
ieLayer.setDims(outShapes[0]);
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -78,7 +78,7 @@ public:
|
|||||||
return interpolation == "nearest" || interpolation == "bilinear" || interpolation == "opencv_linear";
|
return interpolation == "nearest" || interpolation == "bilinear" || interpolation == "opencv_linear";
|
||||||
|
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
{
|
{
|
||||||
return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
|
return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
|
||||||
(interpolation == "bilinear");
|
(interpolation == "bilinear");
|
||||||
@ -308,38 +308,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::Layer ieLayer(name);
|
|
||||||
ieLayer.setName(name);
|
|
||||||
if (interpolation == "nearest")
|
|
||||||
{
|
|
||||||
ieLayer.setType("Resample");
|
|
||||||
ieLayer.getParameters()["type"] = std::string("caffe.ResampleParameter.NEAREST");
|
|
||||||
ieLayer.getParameters()["antialias"] = false;
|
|
||||||
if (scaleWidth != scaleHeight)
|
|
||||||
CV_Error(Error::StsNotImplemented, "resample with sw != sh");
|
|
||||||
ieLayer.getParameters()["factor"] = 1.0f / scaleWidth;
|
|
||||||
}
|
|
||||||
else if (interpolation == "bilinear")
|
|
||||||
{
|
|
||||||
ieLayer.setType("Interp");
|
|
||||||
ieLayer.getParameters()["pad_beg"] = 0;
|
|
||||||
ieLayer.getParameters()["pad_end"] = 0;
|
|
||||||
ieLayer.getParameters()["align_corners"] = alignCorners;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation);
|
|
||||||
ieLayer.getParameters()["width"] = outWidth;
|
|
||||||
ieLayer.getParameters()["height"] = outHeight;
|
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
|
|
||||||
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -78,11 +78,13 @@ public:
|
|||||||
{
|
{
|
||||||
return backendId == DNN_BACKEND_OPENCV;
|
return backendId == DNN_BACKEND_OPENCV;
|
||||||
}
|
}
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return axis > 0;
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
backendId == DNN_BACKEND_HALIDE ||
|
backendId == DNN_BACKEND_HALIDE ||
|
||||||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && axis == 1 && !blobs.empty()) ||
|
|
||||||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && axis > 0) ||
|
|
||||||
(backendId == DNN_BACKEND_WEBNN && axis >0);
|
(backendId == DNN_BACKEND_WEBNN && axis >0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -314,34 +316,6 @@ public:
|
|||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name);
|
|
||||||
|
|
||||||
CV_Assert(!blobs.empty());
|
|
||||||
const size_t numChannels = blobs[0].total();
|
|
||||||
if (hasWeights)
|
|
||||||
{
|
|
||||||
addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
auto weights = InferenceEngine::make_shared_blob<float>({
|
|
||||||
InferenceEngine::Precision::FP32, {(size_t)numChannels},
|
|
||||||
InferenceEngine::Layout::C
|
|
||||||
});
|
|
||||||
weights->allocate();
|
|
||||||
float* buf = weights->buffer().as<float*>();
|
|
||||||
std::fill(buf, buf + numChannels, 1);
|
|
||||||
addConstantData("weights", weights, l);
|
|
||||||
}
|
|
||||||
if (hasBias)
|
|
||||||
addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l);
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(l));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -166,12 +166,7 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
#ifdef HAVE_INF_ENGINE
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
|
||||||
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
|
|
||||||
sliceRanges.size() == 1 && sliceRanges[0].size() == 4 && !hasSteps;
|
|
||||||
#endif
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
return sliceRanges.size() == 1 && !hasSteps;
|
return sliceRanges.size() == 1 && !hasSteps;
|
||||||
#endif
|
#endif
|
||||||
@ -573,64 +568,6 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1)
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
CV_Assert_N(finalSliceRanges.size() == 1, inputs.size() <= 2);
|
|
||||||
|
|
||||||
std::vector<size_t> axes, offsets, dims;
|
|
||||||
int from, to, step;
|
|
||||||
int numDims = finalSliceRanges[0].size();
|
|
||||||
if (preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL)
|
|
||||||
{
|
|
||||||
from = axis;
|
|
||||||
to = numDims;
|
|
||||||
step = 1;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
from = numDims - 1;
|
|
||||||
to = axis - 1;
|
|
||||||
step = -1;
|
|
||||||
}
|
|
||||||
for (int i = from; i != to; i += step)
|
|
||||||
{
|
|
||||||
axes.push_back(i);
|
|
||||||
offsets.push_back(finalSliceRanges[0][i].start);
|
|
||||||
dims.push_back(finalSliceRanges[0][i].size());
|
|
||||||
}
|
|
||||||
|
|
||||||
InferenceEngine::Builder::Layer ieLayer(name);
|
|
||||||
ieLayer.setName(name);
|
|
||||||
ieLayer.setType("Crop");
|
|
||||||
ieLayer.getParameters()["axis"] = axes;
|
|
||||||
ieLayer.getParameters()["dim"] = dims;
|
|
||||||
ieLayer.getParameters()["offset"] = offsets;
|
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(2));
|
|
||||||
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
|
||||||
|
|
||||||
if (inputs.size() != 2)
|
|
||||||
{
|
|
||||||
std::vector<size_t> outShape(numDims);
|
|
||||||
for (int i = 0; i < numDims; ++i)
|
|
||||||
outShape[i] = finalSliceRanges[0][i].size();
|
|
||||||
|
|
||||||
ieLayer.getInputPorts()[1].setParameter("type", "weights");
|
|
||||||
|
|
||||||
auto shapeSource = InferenceEngine::make_shared_blob<float>({
|
|
||||||
InferenceEngine::Precision::FP32, outShape,
|
|
||||||
InferenceEngine::Layout::ANY
|
|
||||||
});
|
|
||||||
shapeSource->allocate();
|
|
||||||
addConstantData("weights", shapeSource, ieLayer);
|
|
||||||
}
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||||
|
@ -99,6 +99,10 @@ public:
|
|||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
|
return true;
|
||||||
|
#endif
|
||||||
#ifdef HAVE_WEBNN
|
#ifdef HAVE_WEBNN
|
||||||
if (backendId == DNN_BACKEND_WEBNN) {
|
if (backendId == DNN_BACKEND_WEBNN) {
|
||||||
// TODO: support logSoftMax
|
// TODO: support logSoftMax
|
||||||
@ -112,8 +116,6 @@ public:
|
|||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
backendId == DNN_BACKEND_CUDA ||
|
backendId == DNN_BACKEND_CUDA ||
|
||||||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) ||
|
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) ||
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
|
|
||||||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() && !logSoftMax) ||
|
|
||||||
(backendId == DNN_BACKEND_VKCOM && haveVulkan());
|
(backendId == DNN_BACKEND_VKCOM && haveVulkan());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -360,17 +362,6 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
|
|
||||||
{
|
|
||||||
InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
|
|
||||||
|
|
||||||
InferenceEngine::Builder::SoftMaxLayer ieLayer(name);
|
|
||||||
ieLayer.setAxis(normalize_axis(axisRaw, input->getDims().size()));
|
|
||||||
|
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
|
||||||
}
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -48,37 +48,16 @@
|
|||||||
#pragma GCC diagnostic ignored "-Wsuggest-override"
|
#pragma GCC diagnostic ignored "-Wsuggest-override"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(HAVE_DNN_IE_NN_BUILDER_2019) || INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2020_4)
|
|
||||||
//#define INFERENCE_ENGINE_DEPRECATED // turn off deprecation warnings from IE
|
|
||||||
//there is no way to suppress warnings from IE only at this moment, so we are forced to suppress warnings globally
|
|
||||||
#if defined(__GNUC__)
|
|
||||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
|
||||||
#endif
|
|
||||||
#ifdef _MSC_VER
|
|
||||||
#pragma warning(disable: 4996) // was declared deprecated
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
|
|
||||||
#pragma GCC visibility push(default)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include <inference_engine.hpp>
|
#include <inference_engine.hpp>
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
#include <ie_builders.hpp>
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__GNUC__) && INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_1)
|
|
||||||
#pragma GCC visibility pop
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(__GNUC__) && __GNUC__ >= 5
|
#if defined(__GNUC__) && __GNUC__ >= 5
|
||||||
//#pragma GCC diagnostic pop
|
//#pragma GCC diagnostic pop
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif // HAVE_INF_ENGINE
|
#endif // HAVE_INF_ENGINE
|
||||||
|
|
||||||
|
#define CV_ERROR_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 do { CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine NN Builder API support (legacy API is not supported anymore)"); } while (0)
|
||||||
|
|
||||||
namespace cv { namespace dnn {
|
namespace cv { namespace dnn {
|
||||||
|
|
||||||
#ifdef HAVE_INF_ENGINE
|
#ifdef HAVE_INF_ENGINE
|
||||||
@ -90,167 +69,6 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
|
|||||||
void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
|
void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
|
||||||
std::vector<Mat>& mats);
|
std::vector<Mat>& mats);
|
||||||
|
|
||||||
#ifdef HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
class InfEngineBackendNet
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
InfEngineBackendNet();
|
|
||||||
|
|
||||||
InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
|
|
||||||
|
|
||||||
void addLayer(InferenceEngine::Builder::Layer& layer);
|
|
||||||
|
|
||||||
void addOutput(const std::string& name);
|
|
||||||
|
|
||||||
void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
|
|
||||||
const std::vector<Ptr<BackendWrapper> >& outputs,
|
|
||||||
const std::string& layerName);
|
|
||||||
|
|
||||||
bool isInitialized();
|
|
||||||
|
|
||||||
void init(Target targetId);
|
|
||||||
|
|
||||||
void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
|
|
||||||
bool isAsync);
|
|
||||||
|
|
||||||
void initPlugin(InferenceEngine::CNNNetwork& net);
|
|
||||||
|
|
||||||
void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
|
|
||||||
|
|
||||||
void reset();
|
|
||||||
|
|
||||||
private:
|
|
||||||
InferenceEngine::Builder::Network netBuilder;
|
|
||||||
|
|
||||||
InferenceEngine::ExecutableNetwork netExec;
|
|
||||||
InferenceEngine::BlobMap allBlobs;
|
|
||||||
std::string device_name;
|
|
||||||
#if INF_ENGINE_VER_MAJOR_LE(2019010000)
|
|
||||||
InferenceEngine::InferenceEnginePluginPtr enginePtr;
|
|
||||||
InferenceEngine::InferencePlugin plugin;
|
|
||||||
#else
|
|
||||||
bool isInit = false;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct InfEngineReqWrapper
|
|
||||||
{
|
|
||||||
InfEngineReqWrapper() : isReady(true) {}
|
|
||||||
|
|
||||||
void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);
|
|
||||||
|
|
||||||
InferenceEngine::InferRequest req;
|
|
||||||
std::vector<cv::AsyncPromise> outProms;
|
|
||||||
std::vector<std::string> outsNames;
|
|
||||||
bool isReady;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::vector<Ptr<InfEngineReqWrapper> > infRequests;
|
|
||||||
|
|
||||||
InferenceEngine::CNNNetwork cnn;
|
|
||||||
bool hasNetOwner;
|
|
||||||
|
|
||||||
std::map<std::string, int> layers;
|
|
||||||
std::vector<std::string> requestedOutputs;
|
|
||||||
|
|
||||||
std::set<std::pair<int, int> > unconnectedPorts;
|
|
||||||
};
|
|
||||||
|
|
||||||
class InfEngineBackendNode : public BackendNode
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
|
|
||||||
|
|
||||||
InfEngineBackendNode(Ptr<Layer>& layer, std::vector<Mat*>& inputs,
|
|
||||||
std::vector<Mat>& outputs, std::vector<Mat>& internals);
|
|
||||||
|
|
||||||
void connect(std::vector<Ptr<BackendWrapper> >& inputs,
|
|
||||||
std::vector<Ptr<BackendWrapper> >& outputs);
|
|
||||||
|
|
||||||
// Inference Engine network object that allows to obtain the outputs of this layer.
|
|
||||||
InferenceEngine::Builder::Layer layer;
|
|
||||||
Ptr<InfEngineBackendNet> net;
|
|
||||||
// CPU fallback in case of unsupported Inference Engine layer.
|
|
||||||
Ptr<dnn::Layer> cvLayer;
|
|
||||||
};
|
|
||||||
|
|
||||||
class InfEngineBackendWrapper : public BackendWrapper
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
InfEngineBackendWrapper(int targetId, const Mat& m);
|
|
||||||
|
|
||||||
InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);
|
|
||||||
|
|
||||||
~InfEngineBackendWrapper();
|
|
||||||
|
|
||||||
static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
|
|
||||||
|
|
||||||
virtual void copyToHost() CV_OVERRIDE;
|
|
||||||
|
|
||||||
virtual void setHostDirty() CV_OVERRIDE;
|
|
||||||
|
|
||||||
InferenceEngine::DataPtr dataPtr;
|
|
||||||
InferenceEngine::Blob::Ptr blob;
|
|
||||||
AsyncArray futureMat;
|
|
||||||
};
|
|
||||||
|
|
||||||
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
|
|
||||||
|
|
||||||
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
|
|
||||||
|
|
||||||
InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
|
|
||||||
|
|
||||||
// Convert Inference Engine blob with FP32 precision to FP16 precision.
|
|
||||||
// Allocates memory for a new blob.
|
|
||||||
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
|
|
||||||
|
|
||||||
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
|
|
||||||
|
|
||||||
// This is a fake class to run networks from Model Optimizer. Objects of that
|
|
||||||
// class simulate responses of layers are imported by OpenCV and supported by
|
|
||||||
// Inference Engine. The main difference is that they do not perform forward pass.
|
|
||||||
class InfEngineBackendLayer : public Layer
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
|
|
||||||
|
|
||||||
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
|
||||||
const int requiredOutputs,
|
|
||||||
std::vector<MatShape> &outputs,
|
|
||||||
std::vector<MatShape> &internals) const CV_OVERRIDE;
|
|
||||||
|
|
||||||
virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
|
|
||||||
OutputArrayOfArrays internals) CV_OVERRIDE;
|
|
||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE;
|
|
||||||
|
|
||||||
private:
|
|
||||||
InferenceEngine::CNNNetwork t_net;
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
class InfEngineExtension : public InferenceEngine::IExtension
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
|
|
||||||
virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
|
|
||||||
#endif
|
|
||||||
virtual void Unload() noexcept {}
|
|
||||||
virtual void Release() noexcept {}
|
|
||||||
virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
|
|
||||||
|
|
||||||
virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
|
|
||||||
InferenceEngine::ResponseDesc*) noexcept
|
|
||||||
{
|
|
||||||
return InferenceEngine::StatusCode::OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
|
|
||||||
const InferenceEngine::CNNLayer* cnnLayer,
|
|
||||||
InferenceEngine::ResponseDesc* resp) noexcept;
|
|
||||||
};
|
|
||||||
|
|
||||||
#endif // HAVE_DNN_IE_NN_BUILDER_2019
|
|
||||||
|
|
||||||
|
|
||||||
CV__DNN_INLINE_NS_BEGIN
|
CV__DNN_INLINE_NS_BEGIN
|
||||||
@ -273,14 +91,8 @@ static inline std::vector<T> getShape(const Mat& mat)
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#endif // HAVE_INF_ENGINE
|
#endif // HAVE_INF_ENGINE
|
||||||
|
|
||||||
bool haveInfEngine();
|
|
||||||
|
|
||||||
void forwardInfEngine(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
|
|
||||||
Ptr<BackendNode>& node, bool isAsync);
|
|
||||||
|
|
||||||
}} // namespace dnn, namespace cv
|
}} // namespace dnn, namespace cv
|
||||||
|
|
||||||
#endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__
|
#endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__
|
||||||
|
@ -337,16 +337,6 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
|
|||||||
std::vector< tuple<Backend, Target> > targets;
|
std::vector< tuple<Backend, Target> > targets;
|
||||||
std::vector< Target > available;
|
std::vector< Target > available;
|
||||||
|
|
||||||
{
|
|
||||||
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
|
|
||||||
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
|
|
||||||
{
|
|
||||||
if ((*i == DNN_TARGET_MYRIAD || *i == DNN_TARGET_HDDL) && !withVPU)
|
|
||||||
continue;
|
|
||||||
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
{
|
||||||
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
||||||
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
|
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
|
||||||
|
@ -371,17 +371,17 @@ TEST_P(DNNTestOpenVINO, models)
|
|||||||
|| modelName == "person-vehicle-bike-detection-2004" // 2021.4+: ncDeviceOpen:1013 Failed to find booted device after boot
|
|| modelName == "person-vehicle-bike-detection-2004" // 2021.4+: ncDeviceOpen:1013 Failed to find booted device after boot
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||||
if (targetId == DNN_TARGET_OPENCL && (false
|
if (targetId == DNN_TARGET_OPENCL && (false
|
||||||
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
|
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||||
if (targetId == DNN_TARGET_OPENCL_FP16 && (false
|
if (targetId == DNN_TARGET_OPENCL_FP16 && (false
|
||||||
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
|
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if INF_ENGINE_VER_MAJOR_GE(2020020000)
|
#if INF_ENGINE_VER_MAJOR_GE(2020020000)
|
||||||
@ -397,12 +397,7 @@ TEST_P(DNNTestOpenVINO, models)
|
|||||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
bool isFP16 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD);
|
bool isFP16 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD);
|
||||||
|
|
||||||
|
@ -1265,12 +1265,7 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
|
|||||||
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
throw SkipTestException("No support for async forward");
|
throw SkipTestException("No support for async forward");
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt"));
|
Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt"));
|
||||||
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
|
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
|
||||||
@ -1310,12 +1305,7 @@ TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
|
|||||||
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
throw SkipTestException("No support for async forward");
|
throw SkipTestException("No support for async forward");
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
int blobSize[] = {2, 6, 75, 113};
|
int blobSize[] = {2, 6, 75, 113};
|
||||||
Mat inputs[] = {Mat(4, &blobSize[0], CV_8U), Mat()};
|
Mat inputs[] = {Mat(4, &blobSize[0], CV_8U), Mat()};
|
||||||
@ -1348,12 +1338,7 @@ TEST_P(Layer_Test_Convolution_DLDT, multithreading)
|
|||||||
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||||
throw SkipTestException("No support for async forward");
|
throw SkipTestException("No support for async forward");
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
std::string xmlPath = _tf("layer_convolution.xml");
|
std::string xmlPath = _tf("layer_convolution.xml");
|
||||||
std::string binPath = _tf("layer_convolution.bin");
|
std::string binPath = _tf("layer_convolution.bin");
|
||||||
|
@ -117,12 +117,7 @@ void test_readNet_IE_do_not_call_setInput(Backend backendId)
|
|||||||
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
||||||
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
Net net = readNet(model, proto);
|
Net net = readNet(model, proto);
|
||||||
net.setPreferableBackend(backendId);
|
net.setPreferableBackend(backendId);
|
||||||
@ -462,12 +457,7 @@ TEST_P(Async, model_optimizer_pipeline_set_and_forward_single)
|
|||||||
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
||||||
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
Net netSync = readNet(model, proto);
|
Net netSync = readNet(model, proto);
|
||||||
netSync.setPreferableBackend(backendId);
|
netSync.setPreferableBackend(backendId);
|
||||||
@ -523,12 +513,7 @@ TEST_P(Async, model_optimizer_pipeline_set_and_forward_all)
|
|||||||
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
||||||
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
Net netSync = readNet(model, proto);
|
Net netSync = readNet(model, proto);
|
||||||
netSync.setPreferableBackend(backendId);
|
netSync.setPreferableBackend(backendId);
|
||||||
@ -586,12 +571,7 @@ TEST_P(Async, create_layer_pipeline_set_and_forward_all)
|
|||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && dtype == CV_8U)
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && dtype == CV_8U)
|
||||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
Net netSync;
|
Net netSync;
|
||||||
Net netAsync;
|
Net netAsync;
|
||||||
@ -697,12 +677,7 @@ TEST_P(Test_Model_Optimizer, forward_two_nets)
|
|||||||
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
||||||
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
Net net0 = readNet(model, proto);
|
Net net0 = readNet(model, proto);
|
||||||
net0.setPreferableTarget(targetId);
|
net0.setPreferableTarget(targetId);
|
||||||
@ -741,12 +716,7 @@ TEST_P(Test_Model_Optimizer, readFromBuffer)
|
|||||||
const std::string& weightsFile = findDataFile("dnn/layers/layer_convolution.bin");
|
const std::string& weightsFile = findDataFile("dnn/layers/layer_convolution.bin");
|
||||||
const std::string& modelFile = findDataFile("dnn/layers/layer_convolution.xml");
|
const std::string& modelFile = findDataFile("dnn/layers/layer_convolution.xml");
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
Net net1 = readNetFromModelOptimizer(modelFile, weightsFile);
|
Net net1 = readNetFromModelOptimizer(modelFile, weightsFile);
|
||||||
net1.setPreferableBackend(backendId);
|
net1.setPreferableBackend(backendId);
|
||||||
@ -793,12 +763,7 @@ TEST_P(Test_Model_Optimizer, flexible_inputs)
|
|||||||
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
const std::string& model = findDataFile("dnn/layers/layer_convolution.bin");
|
||||||
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
const std::string& proto = findDataFile("dnn/layers/layer_convolution.xml");
|
||||||
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId);
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
|
|
||||||
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
||||||
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
|
|
||||||
else
|
|
||||||
FAIL() << "Unknown backendId";
|
|
||||||
|
|
||||||
Net net0 = readNet(model, proto);
|
Net net0 = readNet(model, proto);
|
||||||
net0.setPreferableTarget(targetId);
|
net0.setPreferableTarget(targetId);
|
||||||
|
@ -3559,10 +3559,11 @@ a mask and then extract the contour, or copy the region to another image, and so
|
|||||||
function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
|
function unless the #FLOODFILL_MASK_ONLY flag is set in the second variant of the function. See
|
||||||
the details below.
|
the details below.
|
||||||
@param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
|
@param mask Operation mask that should be a single-channel 8-bit image, 2 pixels wider and 2 pixels
|
||||||
taller than image. Since this is both an input and output parameter, you must take responsibility
|
taller than image. If an empty Mat is passed it will be created automatically. Since this is both an
|
||||||
of initializing it. Flood-filling cannot go across non-zero pixels in the input mask. For example,
|
input and output parameter, you must take responsibility of initializing it.
|
||||||
|
Flood-filling cannot go across non-zero pixels in the input mask. For example,
|
||||||
an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
|
an edge detector output can be used as a mask to stop filling at edges. On output, pixels in the
|
||||||
mask corresponding to filled pixels in the image are set to 1 or to the a value specified in flags
|
mask corresponding to filled pixels in the image are set to 1 or to the specified value in flags
|
||||||
as described below. Additionally, the function fills the border of the mask with ones to simplify
|
as described below. Additionally, the function fills the border of the mask with ones to simplify
|
||||||
internal processing. It is therefore possible to use the same mask in multiple calls to the function
|
internal processing. It is therefore possible to use the same mask in multiple calls to the function
|
||||||
to make sure the filled areas do not overlap.
|
to make sure the filled areas do not overlap.
|
||||||
@ -4832,13 +4833,11 @@ CV_EXPORTS_W double getFontScaleFromHeight(const int fontFace,
|
|||||||
const int pixelHeight,
|
const int pixelHeight,
|
||||||
const int thickness = 1);
|
const int thickness = 1);
|
||||||
|
|
||||||
/** @brief Line iterator
|
/** @brief Class for iterating over all pixels on a raster line segment.
|
||||||
|
|
||||||
The class is used to iterate over all the pixels on the raster line
|
The class LineIterator is used to get each pixel of a raster line connecting
|
||||||
segment connecting two specified points.
|
two specified points.
|
||||||
|
It can be treated as a versatile implementation of the Bresenham algorithm
|
||||||
The class LineIterator is used to get each pixel of a raster line. It
|
|
||||||
can be treated as versatile implementation of the Bresenham algorithm
|
|
||||||
where you can stop at each pixel and do some extra processing, for
|
where you can stop at each pixel and do some extra processing, for
|
||||||
example, grab pixel values along the line or draw a line with an effect
|
example, grab pixel values along the line or draw a line with an effect
|
||||||
(for example, with XOR operation).
|
(for example, with XOR operation).
|
||||||
@ -4867,14 +4866,19 @@ for(int i = 0; i < it2.count; i++, ++it2)
|
|||||||
class CV_EXPORTS LineIterator
|
class CV_EXPORTS LineIterator
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/** @brief initializes the iterator
|
/** @brief Initializes iterator object for the given line and image.
|
||||||
|
|
||||||
creates iterators for the line connecting pt1 and pt2
|
The returned iterator can be used to traverse all pixels on a line that
|
||||||
the line will be clipped on the image boundaries
|
connects the given two points.
|
||||||
the line is 8-connected or 4-connected
|
The line will be clipped on the image boundaries.
|
||||||
If leftToRight=true, then the iteration is always done
|
|
||||||
from the left-most point to the right most,
|
@param img Underlying image.
|
||||||
not to depend on the ordering of pt1 and pt2 parameters;
|
@param pt1 First endpoint of the line.
|
||||||
|
@param pt2 The other endpoint of the line.
|
||||||
|
@param connectivity Pixel connectivity of the iterator. Valid values are 4 (iterator can move
|
||||||
|
up, down, left and right) and 8 (iterator can also move diagonally).
|
||||||
|
@param leftToRight If true, the line is traversed from the leftmost endpoint to the rightmost
|
||||||
|
endpoint. Otherwise, the line is traversed from \p pt1 to \p pt2.
|
||||||
*/
|
*/
|
||||||
LineIterator( const Mat& img, Point pt1, Point pt2,
|
LineIterator( const Mat& img, Point pt1, Point pt2,
|
||||||
int connectivity = 8, bool leftToRight = false )
|
int connectivity = 8, bool leftToRight = false )
|
||||||
@ -4907,16 +4911,23 @@ public:
|
|||||||
}
|
}
|
||||||
void init(const Mat* img, Rect boundingAreaRect, Point pt1, Point pt2, int connectivity, bool leftToRight);
|
void init(const Mat* img, Rect boundingAreaRect, Point pt1, Point pt2, int connectivity, bool leftToRight);
|
||||||
|
|
||||||
/** @brief returns pointer to the current pixel
|
/** @brief Returns pointer to the current pixel.
|
||||||
*/
|
*/
|
||||||
uchar* operator *();
|
uchar* operator *();
|
||||||
/** @brief prefix increment operator (++it). shifts iterator to the next pixel
|
|
||||||
|
/** @brief Moves iterator to the next pixel on the line.
|
||||||
|
|
||||||
|
This is the prefix version (++it).
|
||||||
*/
|
*/
|
||||||
LineIterator& operator ++();
|
LineIterator& operator ++();
|
||||||
/** @brief postfix increment operator (it++). shifts iterator to the next pixel
|
|
||||||
|
/** @brief Moves iterator to the next pixel on the line.
|
||||||
|
|
||||||
|
This is the postfix version (it++).
|
||||||
*/
|
*/
|
||||||
LineIterator operator ++(int);
|
LineIterator operator ++(int);
|
||||||
/** @brief returns coordinates of the current pixel
|
|
||||||
|
/** @brief Returns coordinates of the current pixel.
|
||||||
*/
|
*/
|
||||||
Point pos() const;
|
Point pos() const;
|
||||||
|
|
||||||
|
@ -477,11 +477,10 @@ int cv::floodFill( InputOutputArray _image, InputOutputArray _mask,
|
|||||||
nv_buf._[0] = nv_buf._[1] = nv_buf._[2] = nv_buf._[3] = 0;
|
nv_buf._[0] = nv_buf._[1] = nv_buf._[2] = nv_buf._[3] = 0;
|
||||||
|
|
||||||
struct { Vec3b b; Vec3i i; Vec3f f; } ld_buf, ud_buf;
|
struct { Vec3b b; Vec3i i; Vec3f f; } ld_buf, ud_buf;
|
||||||
Mat img = _image.getMat(), mask;
|
|
||||||
if( !_mask.empty() )
|
|
||||||
mask = _mask.getMat();
|
|
||||||
Size size = img.size();
|
|
||||||
|
|
||||||
|
Mat img = _image.getMat(), mask;
|
||||||
|
|
||||||
|
Size size = img.size();
|
||||||
int type = img.type();
|
int type = img.type();
|
||||||
int depth = img.depth();
|
int depth = img.depth();
|
||||||
int cn = img.channels();
|
int cn = img.channels();
|
||||||
@ -495,6 +494,20 @@ int cv::floodFill( InputOutputArray _image, InputOutputArray _mask,
|
|||||||
if( connectivity != 0 && connectivity != 4 && connectivity != 8 )
|
if( connectivity != 0 && connectivity != 4 && connectivity != 8 )
|
||||||
CV_Error( CV_StsBadFlag, "Connectivity must be 4, 0(=4) or 8" );
|
CV_Error( CV_StsBadFlag, "Connectivity must be 4, 0(=4) or 8" );
|
||||||
|
|
||||||
|
if( _mask.empty() )
|
||||||
|
{
|
||||||
|
_mask.create( size.height + 2, size.width + 2, CV_8UC1 );
|
||||||
|
_mask.setTo(0);
|
||||||
|
}
|
||||||
|
|
||||||
|
mask = _mask.getMat();
|
||||||
|
CV_CheckTypeEQ( mask.type(), CV_8U, "" );
|
||||||
|
CV_CheckEQ( mask.rows, size.height + 2, "" );
|
||||||
|
CV_CheckEQ( mask.cols, size.width + 2, "" );
|
||||||
|
|
||||||
|
Mat mask_inner = mask( Rect(1, 1, mask.cols - 2, mask.rows - 2) );
|
||||||
|
copyMakeBorder( mask_inner, mask, 1, 1, 1, 1, BORDER_ISOLATED | BORDER_CONSTANT, Scalar(1) );
|
||||||
|
|
||||||
bool is_simple = mask.empty() && (flags & FLOODFILL_MASK_ONLY) == 0;
|
bool is_simple = mask.empty() && (flags & FLOODFILL_MASK_ONLY) == 0;
|
||||||
|
|
||||||
for( i = 0; i < cn; i++ )
|
for( i = 0; i < cn; i++ )
|
||||||
@ -544,26 +557,6 @@ int cv::floodFill( InputOutputArray _image, InputOutputArray _mask,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if( mask.empty() )
|
|
||||||
{
|
|
||||||
Mat tempMask( size.height + 2, size.width + 2, CV_8UC1 );
|
|
||||||
tempMask.setTo(Scalar::all(0));
|
|
||||||
mask = tempMask;
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
CV_Assert( mask.rows == size.height+2 && mask.cols == size.width+2 );
|
|
||||||
CV_Assert( mask.type() == CV_8U );
|
|
||||||
}
|
|
||||||
|
|
||||||
memset( mask.ptr(), 1, mask.cols );
|
|
||||||
memset( mask.ptr(mask.rows-1), 1, mask.cols );
|
|
||||||
|
|
||||||
for( i = 1; i <= size.height; i++ )
|
|
||||||
{
|
|
||||||
mask.at<uchar>(i, 0) = mask.at<uchar>(i, mask.cols-1) = (uchar)1;
|
|
||||||
}
|
|
||||||
|
|
||||||
if( depth == CV_8U )
|
if( depth == CV_8U )
|
||||||
for( i = 0; i < cn; i++ )
|
for( i = 0; i < cn; i++ )
|
||||||
{
|
{
|
||||||
@ -632,7 +625,8 @@ int cv::floodFill( InputOutputArray _image, Point seedPoint,
|
|||||||
{
|
{
|
||||||
CV_INSTRUMENT_REGION();
|
CV_INSTRUMENT_REGION();
|
||||||
|
|
||||||
return floodFill(_image, Mat(), seedPoint, newVal, rect, loDiff, upDiff, flags);
|
Mat mask;
|
||||||
|
return floodFill(_image, mask, seedPoint, newVal, rect, loDiff, upDiff, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -531,11 +531,11 @@ TEST(Imgproc_FloodFill, maskValue)
|
|||||||
{
|
{
|
||||||
const int n = 50;
|
const int n = 50;
|
||||||
Mat img = Mat::zeros(n, n, CV_8U);
|
Mat img = Mat::zeros(n, n, CV_8U);
|
||||||
Mat mask = Mat::zeros(n + 2, n + 2, CV_8U);
|
Mat mask;
|
||||||
|
|
||||||
circle(img, Point(n/2, n/2), 20, Scalar(100), 4);
|
circle(img, Point(n/2, n/2), 20, Scalar(100), 4);
|
||||||
|
|
||||||
int flags = 4 + CV_FLOODFILL_MASK_ONLY;
|
int flags = 4 + FLOODFILL_MASK_ONLY;
|
||||||
floodFill(img, mask, Point(n/2 + 13, n/2), Scalar(100), NULL, Scalar(), Scalar(), flags);
|
floodFill(img, mask, Point(n/2 + 13, n/2), Scalar(100), NULL, Scalar(), Scalar(), flags);
|
||||||
|
|
||||||
ASSERT_EQ(1, cvtest::norm(mask.rowRange(1, n-1).colRange(1, n-1), NORM_INF));
|
ASSERT_EQ(1, cvtest::norm(mask.rowRange(1, n-1).colRange(1, n-1), NORM_INF));
|
||||||
|
@ -0,0 +1,29 @@
|
|||||||
|
# Copyright (C) 2018-2021 Intel Corporation
|
||||||
|
# SPDX-License-Identifier: Apache-2.0
|
||||||
|
#
|
||||||
|
|
||||||
|
set(PACKAGE_VERSION_MAJOR 2021)
|
||||||
|
set(PACKAGE_VERSION_MINOR 4)
|
||||||
|
set(PACKAGE_VERSION_PATCH 2)
|
||||||
|
set(PACKAGE_VERSION "${PACKAGE_VERSION_MAJOR}.${PACKAGE_VERSION_MINOR}.${PACKAGE_VERSION_PATCH}")
|
||||||
|
|
||||||
|
set(PACKAGE_VERSION_EXACT False)
|
||||||
|
set(PACKAGE_VERSION_COMPATIBLE False)
|
||||||
|
|
||||||
|
# Compatibility with old versioning for 2.x
|
||||||
|
if(PACKAGE_FIND_VERSION_MAJOR VERSION_EQUAL 2)
|
||||||
|
set(PACKAGE_VERSION_COMPATIBLE True)
|
||||||
|
if(${CMAKE_FIND_PACKAGE_NAME}_FIND_REQUIRED)
|
||||||
|
message(WARNING "Inference Engine versioning has changed. Use ${PACKAGE_VERSION} instead of ${PACKAGE_FIND_VERSION}")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(PACKAGE_FIND_VERSION VERSION_EQUAL PACKAGE_VERSION)
|
||||||
|
set(PACKAGE_VERSION_EXACT True)
|
||||||
|
set(PACKAGE_VERSION_COMPATIBLE True)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(PACKAGE_FIND_VERSION_MAJOR EQUAL PACKAGE_VERSION_MAJOR AND
|
||||||
|
PACKAGE_FIND_VERSION VERSION_LESS PACKAGE_VERSION)
|
||||||
|
set(PACKAGE_VERSION_COMPATIBLE True)
|
||||||
|
endif()
|
@ -0,0 +1,31 @@
|
|||||||
|
# Inference Engine CMake config for OpenCV windows package
|
||||||
|
|
||||||
|
get_filename_component(_IMPORT_PREFIX "${CMAKE_CURRENT_LIST_FILE}" PATH)
|
||||||
|
get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
|
||||||
|
get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
|
||||||
|
get_filename_component(_IMPORT_PREFIX "${_IMPORT_PREFIX}" PATH)
|
||||||
|
|
||||||
|
set(InferenceEngine_LIBRARIES IE::inference_engine)
|
||||||
|
add_library(IE::inference_engine SHARED IMPORTED)
|
||||||
|
|
||||||
|
set_target_properties(IE::inference_engine PROPERTIES
|
||||||
|
INTERFACE_INCLUDE_DIRECTORIES "${_IMPORT_PREFIX}/deployment_tools/inference_engine/include"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Import target "IE::inference_engine" for configuration "Debug"
|
||||||
|
set_property(TARGET IE::inference_engine APPEND PROPERTY IMPORTED_CONFIGURATIONS DEBUG)
|
||||||
|
set_target_properties(IE::inference_engine PROPERTIES
|
||||||
|
IMPORTED_IMPLIB_DEBUG "${_IMPORT_PREFIX}/deployment_tools/inference_engine/lib/intel64/inference_engined.lib"
|
||||||
|
IMPORTED_LINK_DEPENDENT_LIBRARIES_DEBUG ""
|
||||||
|
IMPORTED_LOCATION_DEBUG "${_IMPORT_PREFIX}/bin/inference_engined.dll"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Import target "IE::inference_engine" for configuration "Release"
|
||||||
|
set_property(TARGET IE::inference_engine APPEND PROPERTY IMPORTED_CONFIGURATIONS RELEASE)
|
||||||
|
set_target_properties(IE::inference_engine PROPERTIES
|
||||||
|
IMPORTED_IMPLIB_RELEASE "${_IMPORT_PREFIX}/deployment_tools/inference_engine/lib/intel64/inference_engine.lib"
|
||||||
|
IMPORTED_LINK_DEPENDENT_LIBRARIES_RELEASE ""
|
||||||
|
IMPORTED_LOCATION_RELEASE "${_IMPORT_PREFIX}/bin/inference_engine.dll"
|
||||||
|
)
|
||||||
|
|
||||||
|
set(InferenceEngine_FOUND ON)
|
@ -1,3 +1,5 @@
|
|||||||
|
copytree(self.cpath / 'cmake', self.sysrootdir / 'deployment_tools' / 'inference_engine' / 'cmake')
|
||||||
|
|
||||||
sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin')
|
sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin')
|
||||||
copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph')
|
copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph')
|
||||||
#rm_one(self.sysrootdir / 'ngraph' / 'lib' / 'ngraph.dll')
|
#rm_one(self.sysrootdir / 'ngraph' / 'lib' / 'ngraph.dll')
|
||||||
|
@ -388,10 +388,9 @@ class Builder:
|
|||||||
if self.config.dldt_release:
|
if self.config.dldt_release:
|
||||||
cmake_vars['INF_ENGINE_RELEASE'] = str(self.config.dldt_release)
|
cmake_vars['INF_ENGINE_RELEASE'] = str(self.config.dldt_release)
|
||||||
|
|
||||||
cmake_vars['INF_ENGINE_LIB_DIRS:PATH'] = str(builderDLDT.sysrootdir / 'deployment_tools/inference_engine/lib/intel64')
|
InferenceEngine_DIR = str(builderDLDT.sysrootdir / 'deployment_tools' / 'inference_engine' / 'cmake')
|
||||||
assert os.path.exists(cmake_vars['INF_ENGINE_LIB_DIRS:PATH']), cmake_vars['INF_ENGINE_LIB_DIRS:PATH']
|
assert os.path.exists(InferenceEngine_DIR), InferenceEngine_DIR
|
||||||
cmake_vars['INF_ENGINE_INCLUDE_DIRS:PATH'] = str(builderDLDT.sysrootdir / 'deployment_tools/inference_engine/include')
|
cmake_vars['InferenceEngine_DIR:PATH'] = InferenceEngine_DIR
|
||||||
assert os.path.exists(cmake_vars['INF_ENGINE_INCLUDE_DIRS:PATH']), cmake_vars['INF_ENGINE_INCLUDE_DIRS:PATH']
|
|
||||||
|
|
||||||
ngraph_DIR = str(builderDLDT.sysrootdir / 'ngraph/cmake')
|
ngraph_DIR = str(builderDLDT.sysrootdir / 'ngraph/cmake')
|
||||||
if not os.path.exists(ngraph_DIR):
|
if not os.path.exists(ngraph_DIR):
|
||||||
|
@ -195,7 +195,7 @@ def main():
|
|||||||
indices = cv.dnn.NMSBoxesRotated(boxes, confidences, confThreshold, nmsThreshold)
|
indices = cv.dnn.NMSBoxesRotated(boxes, confidences, confThreshold, nmsThreshold)
|
||||||
for i in indices:
|
for i in indices:
|
||||||
# get 4 corners of the rotated rect
|
# get 4 corners of the rotated rect
|
||||||
vertices = cv.boxPoints(boxes[i[0]])
|
vertices = cv.boxPoints(boxes[i])
|
||||||
# scale the bounding box coordinates based on the respective ratios
|
# scale the bounding box coordinates based on the respective ratios
|
||||||
for j in range(4):
|
for j in range(4):
|
||||||
vertices[j][0] *= rW
|
vertices[j][0] *= rW
|
||||||
|
Loading…
Reference in New Issue
Block a user