Merge remote-tracking branch 'upstream/3.4' into merge-3.4

This commit is contained in:
Alexander Alekhin 2019-12-02 16:18:07 +03:00
commit 4b0132ed7a
51 changed files with 3276 additions and 496 deletions

View File

@ -275,6 +275,9 @@ OCV_OPTION(WITH_VULKAN "Include Vulkan support" OFF
OCV_OPTION(WITH_INF_ENGINE "Include Intel Inference Engine support" OFF OCV_OPTION(WITH_INF_ENGINE "Include Intel Inference Engine support" OFF
VISIBLE_IF TRUE VISIBLE_IF TRUE
VERIFY INF_ENGINE_TARGET) VERIFY INF_ENGINE_TARGET)
OCV_OPTION(WITH_NGRAPH "Include nGraph support" WITH_INF_ENGINE
VISIBLE_IF TRUE
VERIFY TARGET ngraph::ngraph)
OCV_OPTION(WITH_JASPER "Include JPEG2K support" ON OCV_OPTION(WITH_JASPER "Include JPEG2K support" ON
VISIBLE_IF NOT IOS VISIBLE_IF NOT IOS
VERIFY HAVE_JASPER) VERIFY HAVE_JASPER)
@ -1423,12 +1426,37 @@ if(WITH_INF_ENGINE OR INF_ENGINE_TARGET)
) )
get_target_property(_inc ${ie_target} INTERFACE_INCLUDE_DIRECTORIES) get_target_property(_inc ${ie_target} INTERFACE_INCLUDE_DIRECTORIES)
status(" Inference Engine:" "${__msg}") status(" Inference Engine:" "${__msg}")
status(" libs:" "${_lib}") status(" * libs:" "${_lib}")
status(" includes:" "${_inc}") status(" * includes:" "${_inc}")
else() else()
status(" Inference Engine:" "NO") status(" Inference Engine:" "NO")
endif() endif()
endif() endif()
if(WITH_NGRAPH OR HAVE_NGRAPH)
if(HAVE_NGRAPH)
set(__target ngraph::ngraph)
set(__msg "YES (${ngraph_VERSION})")
get_target_property(_lib ${__target} IMPORTED_LOCATION)
get_target_property(_lib_imp_rel ${__target} IMPORTED_IMPLIB_RELEASE)
get_target_property(_lib_imp_dbg ${__target} IMPORTED_IMPLIB_DEBUG)
get_target_property(_lib_rel ${__target} IMPORTED_LOCATION_RELEASE)
get_target_property(_lib_dbg ${__target} IMPORTED_LOCATION_DEBUG)
ocv_build_features_string(_lib
IF _lib THEN "${_lib}"
IF _lib_imp_rel AND _lib_imp_dbg THEN "${_lib_imp_rel} / ${_lib_imp_dbg}"
IF _lib_rel AND _lib_dbg THEN "${_lib_rel} / ${_lib_dbg}"
IF _lib_rel THEN "${_lib_rel}"
IF _lib_dbg THEN "${_lib_dbg}"
ELSE "unknown"
)
get_target_property(_inc ${__target} INTERFACE_INCLUDE_DIRECTORIES)
status(" nGraph:" "${__msg}")
status(" * libs:" "${_lib}")
status(" * includes:" "${_inc}")
else()
status(" nGraph:" "NO")
endif()
endif()
if(WITH_EIGEN OR HAVE_EIGEN) if(WITH_EIGEN OR HAVE_EIGEN)
status(" Eigen:" HAVE_EIGEN THEN "YES (ver ${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION})" ELSE NO) status(" Eigen:" HAVE_EIGEN THEN "YES (ver ${EIGEN_WORLD_VERSION}.${EIGEN_MAJOR_VERSION}.${EIGEN_MINOR_VERSION})" ELSE NO)

View File

@ -28,6 +28,15 @@ function(add_custom_ie_build _inc _lib _lib_rel _lib_dbg _msg)
IMPORTED_IMPLIB_DEBUG "${_lib_dbg}" IMPORTED_IMPLIB_DEBUG "${_lib_dbg}"
INTERFACE_INCLUDE_DIRECTORIES "${_inc}" INTERFACE_INCLUDE_DIRECTORIES "${_inc}"
) )
find_library(ie_builder_custom_lib "inference_engine_nn_builder" PATHS "${INF_ENGINE_LIB_DIRS}" NO_DEFAULT_PATH)
if(EXISTS "${ie_builder_custom_lib}")
add_library(inference_engine_nn_builder UNKNOWN IMPORTED)
set_target_properties(inference_engine_nn_builder PROPERTIES
IMPORTED_LOCATION "${ie_builder_custom_lib}"
)
endif()
if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000") if(NOT INF_ENGINE_RELEASE VERSION_GREATER "2018050000")
find_library(INF_ENGINE_OMP_LIBRARY iomp5 PATHS "${INF_ENGINE_OMP_DIR}" NO_DEFAULT_PATH) find_library(INF_ENGINE_OMP_LIBRARY iomp5 PATHS "${INF_ENGINE_OMP_DIR}" NO_DEFAULT_PATH)
if(NOT INF_ENGINE_OMP_LIBRARY) if(NOT INF_ENGINE_OMP_LIBRARY)
@ -37,7 +46,12 @@ function(add_custom_ie_build _inc _lib _lib_rel _lib_dbg _msg)
endif() endif()
endif() endif()
set(INF_ENGINE_VERSION "Unknown" CACHE STRING "") set(INF_ENGINE_VERSION "Unknown" CACHE STRING "")
set(INF_ENGINE_TARGET inference_engine PARENT_SCOPE) set(INF_ENGINE_TARGET inference_engine)
if(TARGET inference_engine_nn_builder)
list(APPEND INF_ENGINE_TARGET inference_engine_nn_builder)
set(_msg "${_msg}, with IE NN Builder API")
endif()
set(INF_ENGINE_TARGET "${INF_ENGINE_TARGET}" PARENT_SCOPE)
message(STATUS "Detected InferenceEngine: ${_msg}") message(STATUS "Detected InferenceEngine: ${_msg}")
endfunction() endfunction()
@ -47,7 +61,7 @@ find_package(InferenceEngine QUIET)
if(InferenceEngine_FOUND) if(InferenceEngine_FOUND)
set(INF_ENGINE_TARGET ${InferenceEngine_LIBRARIES}) set(INF_ENGINE_TARGET ${InferenceEngine_LIBRARIES})
set(INF_ENGINE_VERSION "${InferenceEngine_VERSION}" CACHE STRING "") set(INF_ENGINE_VERSION "${InferenceEngine_VERSION}" CACHE STRING "")
message(STATUS "Detected InferenceEngine: cmake package") message(STATUS "Detected InferenceEngine: cmake package (${InferenceEngine_VERSION})")
endif() endif()
if(NOT INF_ENGINE_TARGET AND INF_ENGINE_LIB_DIRS AND INF_ENGINE_INCLUDE_DIRS) if(NOT INF_ENGINE_TARGET AND INF_ENGINE_LIB_DIRS AND INF_ENGINE_INCLUDE_DIRS)
@ -87,3 +101,15 @@ if(INF_ENGINE_TARGET)
INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}" INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}"
) )
endif() endif()
if(WITH_NGRAPH)
find_package(ngraph QUIET)
if(ngraph_FOUND)
ocv_assert(TARGET ngraph::ngraph)
if(INF_ENGINE_RELEASE VERSION_LESS "2019039999")
message(WARNING "nGraph is not tested with current InferenceEngine version: INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}")
endif()
message(STATUS "Detected ngraph: cmake package (${ngraph_VERSION})")
set(HAVE_NGRAPH ON)
endif()
endif()

View File

@ -124,7 +124,7 @@ The next figures show two common types of radial distortion: barrel distortion (
![](pics/distortion_examples2.png) ![](pics/distortion_examples2.png)
In some cases the image sensor may be tilted in order to focus an oblique plane in front of the In some cases the image sensor may be tilted in order to focus an oblique plane in front of the
camera (Scheimpfug condition). This can be useful for particle image velocimetry (PIV) or camera (Scheimpflug principle). This can be useful for particle image velocimetry (PIV) or
triangulation with a laser fan. The tilt causes a perspective distortion of \f$x''\f$ and triangulation with a laser fan. The tilt causes a perspective distortion of \f$x''\f$ and
\f$y''\f$. This distortion can be modelled in the following way, see e.g. @cite Louhichi07. \f$y''\f$. This distortion can be modelled in the following way, see e.g. @cite Louhichi07.

View File

@ -80,6 +80,7 @@ endif()
set(include_dirs ${fw_inc}) set(include_dirs ${fw_inc})
set(sources_options "") set(sources_options "")
set(libs libprotobuf ${LAPACK_LIBRARIES}) set(libs libprotobuf ${LAPACK_LIBRARIES})
if(OPENCV_DNN_OPENCL AND HAVE_OPENCL) if(OPENCV_DNN_OPENCL AND HAVE_OPENCL)
@ -108,10 +109,20 @@ if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") elseif(CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
ocv_append_source_files_cxx_compiler_options(fw_srcs "-Wno-inconsistent-missing-override") # Clang ocv_append_source_files_cxx_compiler_options(fw_srcs "-Wno-inconsistent-missing-override") # Clang
endif() endif()
set(dnn_runtime_libs "")
if(INF_ENGINE_TARGET)
list(APPEND dnn_runtime_libs ${INF_ENGINE_TARGET})
endif()
if(HAVE_NGRAPH)
add_definitions(-DHAVE_DNN_NGRAPH)
list(APPEND dnn_runtime_libs ngraph::ngraph)
endif()
ocv_glob_module_sources(${sources_options} SOURCES ${fw_srcs}) ocv_glob_module_sources(${sources_options} SOURCES ${fw_srcs})
ocv_create_module(${libs} ${INF_ENGINE_TARGET}) ocv_create_module(${libs} ${dnn_runtime_libs})
ocv_add_samples() ocv_add_samples()
ocv_add_accuracy_tests(${INF_ENGINE_TARGET}) ocv_add_accuracy_tests(${dnn_runtime_libs})
set(perf_path "${CMAKE_CURRENT_LIST_DIR}/perf") set(perf_path "${CMAKE_CURRENT_LIST_DIR}/perf")
file(GLOB_RECURSE perf_srcs "${perf_path}/*.cpp") file(GLOB_RECURSE perf_srcs "${perf_path}/*.cpp")

View File

@ -67,12 +67,17 @@ CV__DNN_INLINE_NS_BEGIN
//! DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if //! DNN_BACKEND_DEFAULT equals to DNN_BACKEND_INFERENCE_ENGINE if
//! OpenCV is built with Intel's Inference Engine library or //! OpenCV is built with Intel's Inference Engine library or
//! DNN_BACKEND_OPENCV otherwise. //! DNN_BACKEND_OPENCV otherwise.
DNN_BACKEND_DEFAULT, DNN_BACKEND_DEFAULT = 0,
DNN_BACKEND_HALIDE, DNN_BACKEND_HALIDE,
DNN_BACKEND_INFERENCE_ENGINE, //!< Intel's Inference Engine computational backend. DNN_BACKEND_INFERENCE_ENGINE, //!< Intel's Inference Engine computational backend
//!< @sa setInferenceEngineBackendType
DNN_BACKEND_OPENCV, DNN_BACKEND_OPENCV,
DNN_BACKEND_VKCOM, DNN_BACKEND_VKCOM,
DNN_BACKEND_CUDA DNN_BACKEND_CUDA,
#ifdef __OPENCV_BUILD
DNN_BACKEND_INFERENCE_ENGINE_NGRAPH = 1000000, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType()
DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, // internal - use DNN_BACKEND_INFERENCE_ENGINE + setInferenceEngineBackendType()
#endif
}; };
/** /**
@ -276,6 +281,8 @@ CV__DNN_INLINE_NS_BEGIN
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs); virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs);
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs, const std::vector<Ptr<BackendNode> >& nodes);
virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs); virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs);
/** /**

View File

@ -14,6 +14,27 @@ namespace cv { namespace dnn {
CV__DNN_INLINE_NS_BEGIN CV__DNN_INLINE_NS_BEGIN
/* Values for 'OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE' parameter */
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API "NN_BUILDER"
#define CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH "NGRAPH"
/** @brief Returns Inference Engine internal backend API.
*
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
*
* Default value is controlled through `OPENCV_DNN_BACKEND_INFERENCE_ENGINE_TYPE` runtime parameter (environment variable).
*/
CV_EXPORTS_W cv::String getInferenceEngineBackendType();
/** @brief Specify Inference Engine internal backend API.
*
* See values of `CV_DNN_BACKEND_INFERENCE_ENGINE_*` macros.
*
* @returns previous value of internal backend API
*/
CV_EXPORTS_W cv::String setInferenceEngineBackendType(const cv::String& newBackendType);
/** @brief Release a Myriad device (binded by OpenCV). /** @brief Release a Myriad device (binded by OpenCV).
* *
* Single Myriad device cannot be shared across multiple processes which uses * Single Myriad device cannot be shared across multiple processes which uses

View File

@ -6,7 +6,7 @@
#define OPENCV_DNN_VERSION_HPP #define OPENCV_DNN_VERSION_HPP
/// Use with major OpenCV version only. /// Use with major OpenCV version only.
#define OPENCV_DNN_API_VERSION 20191111 #define OPENCV_DNN_API_VERSION 20191202
#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS #if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION) #define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)

View File

@ -100,7 +100,7 @@ PERF_TEST_P_(DNNTestNetwork, SqueezeNet_v1_1)
PERF_TEST_P_(DNNTestNetwork, Inception_5h) PERF_TEST_P_(DNNTestNetwork, Inception_5h)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) throw SkipTestException(""); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) throw SkipTestException("");
processNet("dnn/tensorflow_inception_graph.pb", "", processNet("dnn/tensorflow_inception_graph.pb", "",
"inception_5h.yml", "inception_5h.yml",
Mat(cv::Size(224, 224), CV_32FC3), "softmax2"); Mat(cv::Size(224, 224), CV_32FC3), "softmax2");
@ -108,7 +108,7 @@ PERF_TEST_P_(DNNTestNetwork, Inception_5h)
PERF_TEST_P_(DNNTestNetwork, ENet) PERF_TEST_P_(DNNTestNetwork, ENet)
{ {
if ((backend == DNN_BACKEND_INFERENCE_ENGINE) || if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)) (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException(""); throw SkipTestException("");
processNet("dnn/Enet-model-best.net", "", "enet.yml", processNet("dnn/Enet-model-best.net", "", "enet.yml",
@ -126,7 +126,7 @@ PERF_TEST_P_(DNNTestNetwork, OpenFace)
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
throw SkipTestException(""); throw SkipTestException("");
#endif #endif
processNet("dnn/openface_nn4.small2.v1.t7", "", "", processNet("dnn/openface_nn4.small2.v1.t7", "", "",
@ -168,7 +168,7 @@ PERF_TEST_P_(DNNTestNetwork, DenseNet_121)
PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages) PERF_TEST_P_(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
{ {
if (backend == DNN_BACKEND_HALIDE || if (backend == DNN_BACKEND_HALIDE ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)) (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD))
throw SkipTestException(""); throw SkipTestException("");
// The same .caffemodel but modified .prototxt // The same .caffemodel but modified .prototxt
// See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp // See https://github.com/CMU-Perceptual-Computing-Lab/openpose/blob/master/src/openpose/pose/poseParameters.cpp
@ -219,15 +219,15 @@ PERF_TEST_P_(DNNTestNetwork, FastNeuralStyle_eccv16)
PERF_TEST_P_(DNNTestNetwork, Inception_v2_Faster_RCNN) PERF_TEST_P_(DNNTestNetwork, Inception_v2_Faster_RCNN)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
throw SkipTestException("Test is disabled in OpenVINO 2019R1"); throw SkipTestException("Test is disabled in OpenVINO 2019R1");
#endif #endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
throw SkipTestException("Test is disabled in OpenVINO 2019R2"); throw SkipTestException("Test is disabled in OpenVINO 2019R2");
#endif #endif
if (backend == DNN_BACKEND_HALIDE || if (backend == DNN_BACKEND_HALIDE ||
(backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) || (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) ||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)) (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
throw SkipTestException(""); throw SkipTestException("");
processNet("dnn/faster_rcnn_inception_v2_coco_2018_01_28.pb", processNet("dnn/faster_rcnn_inception_v2_coco_2018_01_28.pb",

View File

@ -42,8 +42,10 @@
#include "precomp.hpp" #include "precomp.hpp"
#include "op_halide.hpp" #include "op_halide.hpp"
#include "op_inf_engine.hpp" #include "op_inf_engine.hpp"
#include "ie_ngraph.hpp"
#include "op_vkcom.hpp" #include "op_vkcom.hpp"
#include "op_cuda.hpp" #include "op_cuda.hpp"
#include "halide_scheduler.hpp" #include "halide_scheduler.hpp"
#include <set> #include <set>
@ -104,11 +106,9 @@ public:
return impl; return impl;
} }
static inline bool checkIETarget(int target) #ifdef HAVE_INF_ENGINE
static inline bool checkIETarget(Target target)
{ {
#ifndef HAVE_INF_ENGINE
return false;
#else
cv::dnn::Net net; cv::dnn::Net net;
cv::dnn::LayerParams lp; cv::dnn::LayerParams lp;
lp.set("kernel_size", 1); lp.set("kernel_size", 1);
@ -126,13 +126,14 @@ public:
{ {
net.forward(); net.forward();
} }
catch(...) catch(const std::exception& e)
{ {
CV_LOG_INFO(NULL, "checkIETarget(" << (int)target << ") has failed with message: " << e.what());
return false; return false;
} }
return true; return true;
#endif
} }
#endif
private: private:
BackendRegistry() BackendRegistry()
@ -146,21 +147,37 @@ private:
#endif // HAVE_HALIDE #endif // HAVE_HALIDE
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
if (checkIETarget(DNN_TARGET_CPU)) if (checkIETarget(DNN_TARGET_CPU)) {
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_CPU)); backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_CPU));
if (checkIETarget(DNN_TARGET_MYRIAD)) #ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_MYRIAD)); backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_CPU));
#endif
}
if (checkIETarget(DNN_TARGET_MYRIAD)) {
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_MYRIAD));
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_MYRIAD));
#endif
}
if (checkIETarget(DNN_TARGET_FPGA)) if (checkIETarget(DNN_TARGET_FPGA))
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_FPGA)); backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_FPGA));
# ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel()) if (cv::ocl::useOpenCL() && ocl::Device::getDefault().isIntel())
{ {
if (checkIETarget(DNN_TARGET_OPENCL)) if (checkIETarget(DNN_TARGET_OPENCL)) {
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL)); backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL));
if (checkIETarget(DNN_TARGET_OPENCL_FP16)) #ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE, DNN_TARGET_OPENCL_FP16)); backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL));
#endif
}
if (checkIETarget(DNN_TARGET_OPENCL_FP16)) {
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, DNN_TARGET_OPENCL_FP16));
#ifdef HAVE_DNN_NGRAPH
backends.push_back(std::make_pair(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, DNN_TARGET_OPENCL_FP16));
#endif
}
} }
# endif #endif
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
@ -199,6 +216,10 @@ std::vector<Target> getAvailableTargets(Backend be)
{ {
if (be == DNN_BACKEND_DEFAULT) if (be == DNN_BACKEND_DEFAULT)
be = (Backend)PARAM_DNN_BACKEND_DEFAULT; be = (Backend)PARAM_DNN_BACKEND_DEFAULT;
#ifdef HAVE_INF_ENGINE
if (be == DNN_BACKEND_INFERENCE_ENGINE)
be = getInferenceEngineBackendTypeParam();
#endif
std::vector<Target> result; std::vector<Target> result;
const BackendRegistry::BackendsList all_backends = getAvailableBackends(); const BackendRegistry::BackendsList all_backends = getAvailableBackends();
@ -369,6 +390,7 @@ void imagesFromBlob(const cv::Mat& blob_, OutputArrayOfArrays images_)
} }
} }
#ifdef HAVE_OPENCL
class OpenCLBackendWrapper : public BackendWrapper class OpenCLBackendWrapper : public BackendWrapper
{ {
public: public:
@ -458,6 +480,7 @@ private:
Mat* host; Mat* host;
bool hostDirty; bool hostDirty;
}; };
#endif
struct LayerPin struct LayerPin
{ {
@ -554,7 +577,7 @@ struct DataLayer : public Layer
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && inputsData.size() == 1); (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && inputsData.size() == 1);
} }
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
@ -724,9 +747,9 @@ struct DataLayer : public Layer
} }
} }
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{ {
#ifdef HAVE_INF_ENGINE
CV_CheckEQ(inputsData.size(), (size_t)1, ""); CV_CheckEQ(inputsData.size(), (size_t)1, "");
CV_CheckEQ(inputsData[0].dims, 4, ""); CV_CheckEQ(inputsData[0].dims, 4, "");
const size_t numChannels = inputsData[0].size[1]; const size_t numChannels = inputsData[0].size[1];
@ -755,9 +778,8 @@ struct DataLayer : public Layer
addConstantData("weights", weights, ieLayer); addConstantData("weights", weights, ieLayer);
addConstantData("biases", biases, ieLayer); addConstantData("biases", biases, ieLayer);
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
#endif // HAVE_INF_ENGINE
return Ptr<BackendNode>();
} }
#endif // HAVE_INF_ENGINE
std::vector<String> outNames; std::vector<String> outNames;
// Preprocessing parameters for each network's input. // Preprocessing parameters for each network's input.
@ -998,10 +1020,12 @@ static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m)
{ {
if (targetId == DNN_TARGET_CPU) if (targetId == DNN_TARGET_CPU)
return Ptr<BackendWrapper>(); return Ptr<BackendWrapper>();
#ifdef HAVE_OPENCL
else if (IS_DNN_OPENCL_TARGET(targetId)) else if (IS_DNN_OPENCL_TARGET(targetId))
return OpenCLBackendWrapper::create(m); return OpenCLBackendWrapper::create(m);
#endif
else else
CV_Error(Error::StsNotImplemented, "Unknown target identifier"); CV_Error(Error::StsNotImplemented, "Unknown/unsupported target identifier");
} }
else if (backendId == DNN_BACKEND_HALIDE) else if (backendId == DNN_BACKEND_HALIDE)
{ {
@ -1010,12 +1034,21 @@ static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m)
return Ptr<BackendWrapper>(new HalideBackendWrapper(targetId, m)); return Ptr<BackendWrapper>(new HalideBackendWrapper(targetId, m));
#endif // HAVE_HALIDE #endif // HAVE_HALIDE
} }
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE) else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
CV_Assert(haveInfEngine());
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
return Ptr<BackendWrapper>(new InfEngineBackendWrapper(targetId, m)); return Ptr<BackendWrapper>(new InfEngineBackendWrapper(targetId, m));
#endif // HAVE_INF_ENGINE #else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without Inference Engine API support");
#endif
}
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
return Ptr<BackendWrapper>(new NgraphBackendWrapper(targetId, m));
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
#endif
} }
else if (backendId == DNN_BACKEND_VKCOM) else if (backendId == DNN_BACKEND_VKCOM)
{ {
@ -1042,7 +1075,7 @@ static Ptr<BackendWrapper> wrapMat(int backendId, int targetId, cv::Mat& m)
} }
else else
CV_Error(Error::StsNotImplemented, "Unknown backend identifier"); CV_Error(Error::StsNotImplemented, "Unknown backend identifier");
return Ptr<BackendWrapper>(); return Ptr<BackendWrapper>(); // TODO Error?
} }
struct Net::Impl struct Net::Impl
@ -1128,17 +1161,25 @@ struct Net::Impl
Ptr<BackendWrapper> baseBuffer = backendWrappers[data]; Ptr<BackendWrapper> baseBuffer = backendWrappers[data];
if (preferableBackend == DNN_BACKEND_OPENCV) if (preferableBackend == DNN_BACKEND_OPENCV)
{ {
#ifdef HAVE_OPENCL
CV_Assert(IS_DNN_OPENCL_TARGET(preferableTarget)); CV_Assert(IS_DNN_OPENCL_TARGET(preferableTarget));
return OpenCLBackendWrapper::create(baseBuffer, host); return OpenCLBackendWrapper::create(baseBuffer, host);
#else
CV_Error(Error::StsInternal, "");
#endif
} }
else if (preferableBackend == DNN_BACKEND_HALIDE) else if (preferableBackend == DNN_BACKEND_HALIDE)
{ {
CV_Assert(haveHalide()); CV_Assert(haveHalide());
#ifdef HAVE_HALIDE #ifdef HAVE_HALIDE
return Ptr<BackendWrapper>(new HalideBackendWrapper(baseBuffer, shape)); return Ptr<BackendWrapper>(new HalideBackendWrapper(baseBuffer, shape));
#endif // HAVE_HALIDE #endif
} }
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE) else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
return wrapMat(preferableBackend, preferableTarget, host);
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{ {
return wrapMat(preferableBackend, preferableTarget, host); return wrapMat(preferableBackend, preferableTarget, host);
} }
@ -1252,6 +1293,10 @@ struct Net::Impl
if (preferableBackend == DNN_BACKEND_DEFAULT) if (preferableBackend == DNN_BACKEND_DEFAULT)
preferableBackend = (Backend)PARAM_DNN_BACKEND_DEFAULT; preferableBackend = (Backend)PARAM_DNN_BACKEND_DEFAULT;
#ifdef HAVE_INF_ENGINE
if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE)
preferableBackend = getInferenceEngineBackendTypeParam();
#endif
CV_Assert(preferableBackend != DNN_BACKEND_OPENCV || CV_Assert(preferableBackend != DNN_BACKEND_OPENCV ||
preferableTarget == DNN_TARGET_CPU || preferableTarget == DNN_TARGET_CPU ||
@ -1260,17 +1305,21 @@ struct Net::Impl
CV_Assert(preferableBackend != DNN_BACKEND_HALIDE || CV_Assert(preferableBackend != DNN_BACKEND_HALIDE ||
preferableTarget == DNN_TARGET_CPU || preferableTarget == DNN_TARGET_CPU ||
preferableTarget == DNN_TARGET_OPENCL); preferableTarget == DNN_TARGET_OPENCL);
CV_Assert(preferableBackend != DNN_BACKEND_INFERENCE_ENGINE || if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
CV_Assert(
preferableTarget == DNN_TARGET_CPU || preferableTarget == DNN_TARGET_CPU ||
preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_OPENCL_FP16 ||
preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_FPGA); preferableTarget == DNN_TARGET_FPGA
);
}
CV_Assert(preferableBackend != DNN_BACKEND_VKCOM || CV_Assert(preferableBackend != DNN_BACKEND_VKCOM ||
preferableTarget == DNN_TARGET_VULKAN); preferableTarget == DNN_TARGET_VULKAN);
CV_Assert(preferableBackend != DNN_BACKEND_CUDA || CV_Assert(preferableBackend != DNN_BACKEND_CUDA ||
IS_DNN_CUDA_TARGET(preferableTarget)); IS_DNN_CUDA_TARGET(preferableTarget));
if (!netWasAllocated || this->blobsToKeep != blobsToKeep_) if (!netWasAllocated || this->blobsToKeep != blobsToKeep_)
{ {
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget)) if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
@ -1467,8 +1516,22 @@ struct Net::Impl
CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget)); CV_Assert(preferableTarget == DNN_TARGET_CPU || IS_DNN_OPENCL_TARGET(preferableTarget));
else if (preferableBackend == DNN_BACKEND_HALIDE) else if (preferableBackend == DNN_BACKEND_HALIDE)
initHalideBackend(); initHalideBackend();
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE) else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{
#ifdef HAVE_INF_ENGINE
initInfEngineBackend(); initInfEngineBackend();
#else
CV_Assert(false && "This OpenCV version is built without Inference Engine API support");
#endif
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
initNgraphBackend();
#else
CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
#endif
}
else if (preferableBackend == DNN_BACKEND_VKCOM) else if (preferableBackend == DNN_BACKEND_VKCOM)
initVkComBackend(); initVkComBackend();
else if (preferableBackend == DNN_BACKEND_CUDA) else if (preferableBackend == DNN_BACKEND_CUDA)
@ -1538,6 +1601,7 @@ struct Net::Impl
// layers from default backend or layers from different graphs. // layers from default backend or layers from different graphs.
void addInfEngineNetOutputs(LayerData &ld) void addInfEngineNetOutputs(LayerData &ld)
{ {
CV_TRACE_FUNCTION();
Ptr<InfEngineBackendNet> layerNet; Ptr<InfEngineBackendNet> layerNet;
if (ld.backendNodes.find(preferableBackend) != ld.backendNodes.end()) if (ld.backendNodes.find(preferableBackend) != ld.backendNodes.end())
{ {
@ -1568,47 +1632,11 @@ struct Net::Impl
} }
} }
} }
#endif // HAVE_INF_ENGINE
void initVkComBackend()
{
CV_TRACE_FUNCTION();
CV_Assert(preferableBackend == DNN_BACKEND_VKCOM);
#ifdef HAVE_VULKAN
if (!haveVulkan())
return;
MapIdToLayerData::iterator it = layers.begin();
for (; it != layers.end(); it++)
{
LayerData &ld = it->second;
Ptr<Layer> layer = ld.layerInstance;
if (!layer->supportBackend(preferableBackend))
{
continue;
}
ld.skip = false;
try
{
ld.backendNodes[DNN_BACKEND_VKCOM] =
layer->initVkCom(ld.inputBlobsWrappers);
}
catch (const cv::Exception& e)
{
CV_LOG_ERROR(NULL, "initVkCom failed, fallback to CPU implementation. " << e.what());
ld.backendNodes[DNN_BACKEND_VKCOM] = Ptr<BackendNode>();
}
}
#endif
}
void initInfEngineBackend() void initInfEngineBackend()
{ {
CV_TRACE_FUNCTION(); CV_TRACE_FUNCTION();
CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE, haveInfEngine()); CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, haveInfEngine());
#ifdef HAVE_INF_ENGINE
MapIdToLayerData::iterator it; MapIdToLayerData::iterator it;
Ptr<InfEngineBackendNet> net; Ptr<InfEngineBackendNet> net;
@ -1683,7 +1711,7 @@ struct Net::Impl
ld.skip = true; ld.skip = true;
} }
layers[lastLayerId].skip = false; layers[lastLayerId].skip = false;
ieNode->net->init(preferableTarget); ieNode->net->init((Target)preferableTarget);
return; return;
} }
@ -1858,11 +1886,305 @@ struct Net::Impl
if (!ieNode->net->isInitialized()) if (!ieNode->net->isInitialized())
{ {
ieNode->net->init(preferableTarget); ieNode->net->init((Target)preferableTarget);
ld.skip = false; ld.skip = false;
} }
} }
}
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
void addNgraphOutputs(LayerData &ld)
{
CV_TRACE_FUNCTION();
Ptr<InfEngineNgraphNet> layerNet;
auto it = ld.backendNodes.find(preferableBackend);
if (it != ld.backendNodes.end())
{
Ptr<BackendNode> node = it->second;
if (!node.empty())
{
Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
CV_Assert(!ieNode.empty()); CV_Assert(!ieNode->net.empty());
layerNet = ieNode->net;
}
}
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
if (!inpNode.empty())
{
Ptr<InfEngineNgraphNode> ieInpNode = inpNode.dynamicCast<InfEngineNgraphNode>();
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
if (layerNet != ieInpNode->net)
{
ieInpNode->net->addOutput(ieInpNode->node->get_friendly_name());
ieInpNode->net->setUnconnectedNodes(ieInpNode);
}
}
}
}
void initNgraphBackend()
{
CV_TRACE_FUNCTION();
CV_Assert_N(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, haveInfEngine());
MapIdToLayerData::iterator it;
Ptr<InfEngineNgraphNet> net;
for (it = layers.begin(); it != layers.end(); ++it)
{
LayerData &ld = it->second;
if (ld.id == 0)
{
CV_Assert((netInputLayer->outNames.empty() && ld.outputBlobsWrappers.size() == 1) ||
(netInputLayer->outNames.size() == ld.outputBlobsWrappers.size()));
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
dataPtr->setName(netInputLayer->outNames.empty() ? ld.name : netInputLayer->outNames[i]);
}
}
else
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
dataPtr->setName(ld.name);
}
}
}
if (skipInfEngineInit)
{
Ptr<BackendNode> node = layers[lastLayerId].backendNodes[preferableBackend];
CV_Assert(!node.empty());
Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
CV_Assert(!ieNode.empty());
for (it = layers.begin(); it != layers.end(); ++it)
{
LayerData &ld = it->second;
if (ld.id == 0)
{
for (int i = 0; i < ld.inputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.inputBlobsWrappers[i]);
dataPtr->setName(netInputLayer->outNames[i]);
}
}
else
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
dataPtr->setName(ld.name);
}
}
ieNode->net->addBlobs(ld.inputBlobsWrappers);
ieNode->net->addBlobs(ld.outputBlobsWrappers);
ld.skip = true;
}
layers[lastLayerId].skip = false;
ieNode->net->init((Target)preferableTarget);
return;
}
// Build Inference Engine networks from sets of layers that support this
// backend. Split a whole model on several Inference Engine networks if
// some of layers are not implemented.
for (it = layers.begin(); it != layers.end(); ++it)
{
LayerData &ld = it->second;
if (ld.id == 0 && ld.skip)
continue;
bool fused = ld.skip;
Ptr<Layer> layer = ld.layerInstance;
if (!fused && !layer->supportBackend(preferableBackend))
{
addNgraphOutputs(ld);
net = Ptr<InfEngineNgraphNet>();
layer->preferableTarget = DNN_TARGET_CPU;
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
if (!inpNode.empty()) {
Ptr<InfEngineNgraphNode> ieNode = inpNode.dynamicCast<InfEngineNgraphNode>();
ieNode->net->setUnconnectedNodes(ieNode);
}
}
continue;
}
ld.skip = true; // Initially skip all Inference Engine supported layers.
// Create a new network if one of inputs from different Inference Engine graph.
std::vector<Ptr<BackendNode>> inputNodes;
for (int i = 0; i < ld.inputBlobsId.size(); ++i)
{
// Layer_Test_ROIPooling.Accuracy has 2 inputs inpLD = 0, 0 -> has 4 inputNodes (input, rois, input, rois)
if (inputNodes.size() == ld.inputBlobsId.size()) {
break;
}
LayerData &inpLd = layers[ld.inputBlobsId[i].lid];
Ptr<BackendNode> inpNode = inpLd.backendNodes[preferableBackend];
if (!inpNode.empty())
{
Ptr<InfEngineNgraphNode> ieInpNode = inpNode.dynamicCast<InfEngineNgraphNode>();
CV_Assert(!ieInpNode.empty()); CV_Assert(!ieInpNode->net.empty());
if (ieInpNode->net == net && !fused) {
inputNodes.push_back(inpNode);
continue;
}
}
if (net.empty()) {
net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet());
}
if (!fused) {
std::vector<std::string> inputNames;
std::vector<cv::Mat> inputs;
auto curr_pos = inpLd.consumers.begin();
auto compare = [&ld] (const LayerPin& lp) { return lp.lid == ld.id; };
auto cons = curr_pos;
while ((cons = std::find_if(curr_pos, inpLd.consumers.end(), compare)) !=
inpLd.consumers.end()) {
int cons_inp = cons->oid;
Ptr<NgraphBackendWrapper> inpWrapper = inpLd.outputBlobsWrappers[cons_inp].
dynamicCast<NgraphBackendWrapper>();
auto iter = std::find(inputNames.begin(), inputNames.end(),
inpWrapper->dataPtr->getName());
if (iter == inputNames.end()) {
inputNames.push_back(inpWrapper->dataPtr->getName());
inputs.push_back(inpLd.outputBlobs[cons_inp]);
}
curr_pos = cons + 1;
}
auto inps = net->setInputs(inputs, inputNames);
for (auto& inp : inps) {
inputNodes.emplace_back(Ptr<BackendNode>(new InfEngineNgraphNode(inp)));
}
}
}
Ptr<BackendNode> node;
if (!net.empty())
{
if (fused)
{
bool inPlace = ld.inputBlobsId.size() == 1 && ld.outputBlobs.size() == 1 &&
ld.inputBlobs[0]->data == ld.outputBlobs[0].data;
CV_Assert(inPlace);
node = layers[ld.inputBlobsId[0].lid].backendNodes[preferableBackend];
ld.inputBlobsWrappers = layers[ld.inputBlobsId[0].lid].inputBlobsWrappers;
}
}
else {
net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet());
}
if (!fused)
{
CV_Assert(!inputNodes.empty());
node = layer->initNgraph(ld.inputBlobsWrappers, inputNodes);
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
InferenceEngine::DataPtr dataPtr = ngraphDataNode(ld.outputBlobsWrappers[i]);
node.dynamicCast<InfEngineNgraphNode>()->setName(dataPtr->getName());
}
}
else if (node.empty())
continue;
ld.backendNodes[preferableBackend] = node;
Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
CV_Assert(!ieNode.empty());
ieNode->net = net;
if (ld.consumers.empty()) {
// TF EAST_text_detection
ieNode->net->setUnconnectedNodes(ieNode);
}
ieNode->net->setNodePtr(&ieNode->node);
net->addBlobs(ld.inputBlobsWrappers);
net->addBlobs(ld.outputBlobsWrappers);
addNgraphOutputs(ld);
}
// Initialize all networks.
for (MapIdToLayerData::reverse_iterator it = layers.rbegin(); it != layers.rend(); ++it)
{
LayerData &ld = it->second;
auto iter = ld.backendNodes.find(preferableBackend);
if (iter == ld.backendNodes.end())
continue;
Ptr<BackendNode>& node = iter->second;
if (node.empty())
continue;
Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
if (ieNode.empty())
continue;
CV_Assert(!ieNode->net.empty());
if (!ieNode->net->isInitialized())
{
ieNode->net->setUnconnectedNodes(ieNode);
ieNode->net->createNet((Target)preferableTarget);
ld.skip = false;
}
}
}
#endif // HAVE_DNN_NGRAPH
void initVkComBackend()
{
CV_TRACE_FUNCTION();
CV_Assert(preferableBackend == DNN_BACKEND_VKCOM);
#ifdef HAVE_VULKAN
if (!haveVulkan())
return;
MapIdToLayerData::iterator it = layers.begin();
for (; it != layers.end(); it++)
{
LayerData &ld = it->second;
Ptr<Layer> layer = ld.layerInstance;
if (!layer->supportBackend(preferableBackend))
{
continue;
}
ld.skip = false;
try
{
ld.backendNodes[DNN_BACKEND_VKCOM] =
layer->initVkCom(ld.inputBlobsWrappers);
}
catch (const cv::Exception& e)
{
CV_LOG_ERROR(NULL, "initVkCom failed, fallback to CPU implementation. " << e.what());
ld.backendNodes[DNN_BACKEND_VKCOM] = Ptr<BackendNode>();
}
}
#endif
} }
void initCUDABackend() { void initCUDABackend() {
@ -2025,13 +2347,14 @@ struct Net::Impl
void fuseLayers(const std::vector<LayerPin>& blobsToKeep_) void fuseLayers(const std::vector<LayerPin>& blobsToKeep_)
{ {
if( !fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
preferableBackend != DNN_BACKEND_CUDA &&
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE))
return;
CV_TRACE_FUNCTION(); CV_TRACE_FUNCTION();
if(!fusion || (preferableBackend != DNN_BACKEND_OPENCV &&
preferableBackend != DNN_BACKEND_CUDA &&
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
preferableBackend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
return;
// scan through all the layers. If there is convolution layer followed by the activation layer, // scan through all the layers. If there is convolution layer followed by the activation layer,
// we try to embed this activation into the convolution and disable separate execution of the activation // we try to embed this activation into the convolution and disable separate execution of the activation
std::set<LayerPin> pinsToKeep(blobsToKeep_.begin(), std::set<LayerPin> pinsToKeep(blobsToKeep_.begin(),
@ -2249,6 +2572,7 @@ struct Net::Impl
{ {
Mat& output = ld.outputBlobs[0]; Mat& output = ld.outputBlobs[0];
UMat umat_output; UMat umat_output;
#ifdef HAVE_OPENCL
if (!ld.outputBlobsWrappers.empty() && if (!ld.outputBlobsWrappers.empty() &&
(preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))) (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget)))
{ {
@ -2273,6 +2597,7 @@ struct Net::Impl
umat_outputBlobs = OpenCLBackendWrapper::getUMatVector(ld.outputBlobsWrappers); umat_outputBlobs = OpenCLBackendWrapper::getUMatVector(ld.outputBlobsWrappers);
umat_output = umat_outputBlobs[0]; umat_output = umat_outputBlobs[0];
} }
#endif
// TODO: in general, this optimization can always be done, but // TODO: in general, this optimization can always be done, but
// many layers currently check that the input/output blobs are // many layers currently check that the input/output blobs are
@ -2309,6 +2634,7 @@ struct Net::Impl
// Allocate new memory to prevent collisions during memory // Allocate new memory to prevent collisions during memory
// reusing (see https://github.com/opencv/opencv/pull/10456). // reusing (see https://github.com/opencv/opencv/pull/10456).
output = output.clone(); output = output.clone();
#ifdef HAVE_OPENCL
if (preferableBackend == DNN_BACKEND_OPENCV && if (preferableBackend == DNN_BACKEND_OPENCV &&
IS_DNN_OPENCL_TARGET(preferableTarget)) IS_DNN_OPENCL_TARGET(preferableTarget))
{ {
@ -2317,6 +2643,7 @@ struct Net::Impl
umats[0] = umat_output; umats[0] = umat_output;
OpenCLBackendWrapper::update(ld.outputBlobsWrappers, umats); OpenCLBackendWrapper::update(ld.outputBlobsWrappers, umats);
} }
#endif
Range chrange[] = { Range::all(), Range::all(), Range::all(), Range::all() }; Range chrange[] = { Range::all(), Range::all(), Range::all(), Range::all() };
int ofs = 0; int ofs = 0;
for( i = 0; i < ninputs; i++ ) for( i = 0; i < ninputs; i++ )
@ -2333,12 +2660,14 @@ struct Net::Impl
CV_Assert(output_slice.isContinuous() && output_slice.size == curr_output.size); CV_Assert(output_slice.isContinuous() && output_slice.size == curr_output.size);
Mat* oldPtr = &curr_output; Mat* oldPtr = &curr_output;
curr_output = output_slice; curr_output = output_slice;
#ifdef HAVE_OPENCL
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget)) if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
{ {
std::vector<UMat> umats(inp_i_data->outputBlobsWrappers.size()); std::vector<UMat> umats(inp_i_data->outputBlobsWrappers.size());
umats[pin.oid] = umat_output(chrange); umats[pin.oid] = umat_output(chrange);
OpenCLBackendWrapper::update(inp_i_data->outputBlobsWrappers, umats); OpenCLBackendWrapper::update(inp_i_data->outputBlobsWrappers, umats);
} }
#endif
// Layers that refer old input Mat will refer to the // Layers that refer old input Mat will refer to the
// new data but the same Mat object. // new data but the same Mat object.
CV_Assert_N(curr_output.data == output_slice.data, oldPtr == &curr_output); CV_Assert_N(curr_output.data == output_slice.data, oldPtr == &curr_output);
@ -2431,6 +2760,7 @@ struct Net::Impl
CV_Error(Error::StsNotImplemented, format("Layer \"%s\" of type \"%s\" unsupported on OpenCV backend", CV_Error(Error::StsNotImplemented, format("Layer \"%s\" of type \"%s\" unsupported on OpenCV backend",
ld.name.c_str(), ld.type.c_str())); ld.name.c_str(), ld.type.c_str()));
#ifdef HAVE_OPENCL
if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget)) if (preferableBackend == DNN_BACKEND_OPENCV && IS_DNN_OPENCL_TARGET(preferableTarget))
{ {
std::vector<UMat> umat_inputBlobs = OpenCLBackendWrapper::getUMatVector(ld.inputBlobsWrappers); std::vector<UMat> umat_inputBlobs = OpenCLBackendWrapper::getUMatVector(ld.inputBlobsWrappers);
@ -2505,6 +2835,7 @@ struct Net::Impl
OpenCLBackendWrapper::update(ld.outputBlobsWrappers, umat_outputBlobs); OpenCLBackendWrapper::update(ld.outputBlobsWrappers, umat_outputBlobs);
} }
else else
#endif
{ {
for (int i = 0, n = ld.inputBlobsWrappers.size(); i < n; ++i) for (int i = 0, n = ld.inputBlobsWrappers.size(); i < n; ++i)
{ {
@ -2595,10 +2926,14 @@ struct Net::Impl
{ {
forwardHalide(ld.outputBlobsWrappers, node); forwardHalide(ld.outputBlobsWrappers, node);
} }
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE) else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
forwardInfEngine(ld.outputBlobsWrappers, node, isAsync); forwardInfEngine(ld.outputBlobsWrappers, node, isAsync);
} }
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
forwardNgraph(ld.outputBlobsWrappers, node, isAsync);
}
else if (preferableBackend == DNN_BACKEND_VKCOM) else if (preferableBackend == DNN_BACKEND_VKCOM)
{ {
try try
@ -2799,13 +3134,23 @@ struct Net::Impl
// Transfer data to CPU if it's require. // Transfer data to CPU if it's require.
ld.outputBlobsWrappers[pin.oid]->copyToHost(); ld.outputBlobsWrappers[pin.oid]->copyToHost();
} }
CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE); CV_Assert(preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
Ptr<InfEngineBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<InfEngineBackendWrapper>(); if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
return std::move(wrapper->futureMat); Ptr<InfEngineBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<InfEngineBackendWrapper>();
return std::move(wrapper->futureMat);
}
else if (preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{
#ifdef HAVE_DNN_NGRAPH
Ptr<NgraphBackendWrapper> wrapper = ld.outputBlobsWrappers[pin.oid].dynamicCast<NgraphBackendWrapper>();
return std::move(wrapper->futureMat);
#else #else
CV_Error(Error::StsNotImplemented, "DNN_BACKEND_INFERENCE_ENGINE backend is required"); CV_Error(Error::StsNotImplemented, "This OpenCV version is built without support of Inference Engine + nGraph");
#endif #endif
}
#endif // HAVE_INF_ENGINE
CV_Error(Error::StsNotImplemented, "DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 backend is required");
} }
AsyncArray getBlobAsync(String outputName) AsyncArray getBlobAsync(String outputName)
@ -2824,11 +3169,17 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
#ifndef HAVE_INF_ENGINE #ifndef HAVE_INF_ENGINE
CV_Error(Error::StsError, "Build OpenCV with Inference Engine to enable loading models from Model Optimizer."); CV_Error(Error::StsError, "Build OpenCV with Inference Engine to enable loading models from Model Optimizer.");
#else #else
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
InferenceEngine::CNNNetReader reader; InferenceEngine::CNNNetReader reader;
reader.ReadNetwork(xml); reader.ReadNetwork(xml);
reader.ReadWeights(bin); reader.ReadWeights(bin);
InferenceEngine::CNNNetwork ieNet = reader.getNetwork(); InferenceEngine::CNNNetwork ieNet = reader.getNetwork();
#else
InferenceEngine::Core& ie = getCore();
InferenceEngine::CNNNetwork ieNet = ie.ReadNetwork(xml, bin);
#endif
std::vector<String> inputsNames; std::vector<String> inputsNames;
std::vector<MatShape> inp_shapes; std::vector<MatShape> inp_shapes;
@ -2848,27 +3199,63 @@ Net Net::readFromModelOptimizer(const String& xml, const String& bin)
cvNet.setInput(Mat(inp_shapes[inp_id], CV_32F), inputsNames[inp_id]); cvNet.setInput(Mat(inp_shapes[inp_id], CV_32F), inputsNames[inp_id]);
} }
Ptr<InfEngineBackendNode> backendNode(new InfEngineBackendNode(InferenceEngine::Builder::Layer(""))); Ptr<BackendNode> backendNode;
backendNode->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet)); #ifdef HAVE_DNN_NGRAPH
if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
{
auto fake_node = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape{});
Ptr<InfEngineNgraphNode> backendNodeNGraph(new InfEngineNgraphNode(fake_node));
backendNodeNGraph->net = Ptr<InfEngineNgraphNet>(new InfEngineNgraphNet(ieNet));
backendNode = backendNodeNGraph;
}
else
#endif
{
Ptr<InfEngineBackendNode> backendNodeNN(new InfEngineBackendNode(InferenceEngine::Builder::Layer("")));
backendNodeNN->net = Ptr<InfEngineBackendNet>(new InfEngineBackendNet(ieNet));
backendNode = backendNodeNN;
}
for (auto& it : ieNet.getOutputsInfo()) for (auto& it : ieNet.getOutputsInfo())
{ {
Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
CV_Assert(ieLayer);
LayerParams lp; LayerParams lp;
int lid = cvNet.addLayer(it.first, "", lp); int lid = cvNet.addLayer(it.first, "", lp);
LayerData& ld = cvNet.impl->layers[lid]; LayerData& ld = cvNet.impl->layers[lid];
cvLayer->name = it.first;
cvLayer->type = ieLayer->type; #ifdef HAVE_DNN_NGRAPH
ld.layerInstance = cvLayer; if (DNN_BACKEND_INFERENCE_ENGINE_NGRAPH == getInferenceEngineBackendTypeParam())
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE] = backendNode; {
Ptr<Layer> cvLayer(new NgraphBackendLayer(ieNet));
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
CV_Assert(ieLayer);
cvLayer->name = it.first;
cvLayer->type = ieLayer->type;
ld.layerInstance = cvLayer;
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NGRAPH] = backendNode;
}
else
#endif
{
Ptr<Layer> cvLayer(new InfEngineBackendLayer(ieNet));
InferenceEngine::CNNLayerPtr ieLayer = ieNet.getLayerByName(it.first.c_str());
CV_Assert(ieLayer);
cvLayer->name = it.first;
cvLayer->type = ieLayer->type;
ld.layerInstance = cvLayer;
ld.backendNodes[DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019] = backendNode;
}
for (int i = 0; i < inputsNames.size(); ++i) for (int i = 0; i < inputsNames.size(); ++i)
cvNet.connect(0, i, lid, i); cvNet.connect(0, i, lid, i);
} }
cvNet.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE); cvNet.setPreferableBackend(getInferenceEngineBackendTypeParam());
cvNet.impl->skipInfEngineInit = true; cvNet.impl->skipInfEngineInit = true;
return cvNet; return cvNet;
@ -2953,8 +3340,8 @@ AsyncArray Net::forwardAsync(const String& outputName)
std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName)); std::vector<LayerPin> pins(1, impl->getPinByAlias(layerName));
impl->setUpNet(pins); impl->setUpNet(pins);
if (impl->preferableBackend != DNN_BACKEND_INFERENCE_ENGINE) if (!(impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || impl->preferableBackend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH))
CV_Error(Error::StsNotImplemented, "Asynchronous forward for backend which is different from DNN_BACKEND_INFERENCE_ENGINE"); CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward is supported for Inference Engine backends only");
impl->isAsync = true; impl->isAsync = true;
impl->forwardToLayer(impl->getLayerData(layerName)); impl->forwardToLayer(impl->getLayerData(layerName));
@ -2962,7 +3349,7 @@ AsyncArray Net::forwardAsync(const String& outputName)
return impl->getBlobAsync(layerName); return impl->getBlobAsync(layerName);
#else #else
CV_Error(Error::StsNotImplemented, "Asynchronous forward without C++11"); CV_Error(Error::StsNotImplemented, "DNN: Asynchronous forward requires build with enabled C++11");
#endif // CV_CXX11 #endif // CV_CXX11
} }
@ -3015,6 +3402,7 @@ void Net::forward(OutputArrayOfArrays outputBlobs, const String& outputName)
{ {
std::vector<UMat> & outputvec = *(std::vector<UMat> *)outputBlobs.getObj(); std::vector<UMat> & outputvec = *(std::vector<UMat> *)outputBlobs.getObj();
#ifdef HAVE_OPENCL
if (impl->preferableBackend == DNN_BACKEND_OPENCV && if (impl->preferableBackend == DNN_BACKEND_OPENCV &&
IS_DNN_OPENCL_TARGET(impl->preferableTarget)) IS_DNN_OPENCL_TARGET(impl->preferableTarget))
{ {
@ -3029,6 +3417,7 @@ void Net::forward(OutputArrayOfArrays outputBlobs, const String& outputName)
} }
} }
else else
#endif
{ {
outputvec.resize(ld.outputBlobs.size()); outputvec.resize(ld.outputBlobs.size());
for (int i = 0; i < outputvec.size(); ++i) for (int i = 0; i < outputvec.size(); ++i)
@ -3098,6 +3487,11 @@ void Net::setPreferableBackend(int backendId)
CV_TRACE_FUNCTION(); CV_TRACE_FUNCTION();
CV_TRACE_ARG(backendId); CV_TRACE_ARG(backendId);
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
backendId = getInferenceEngineBackendTypeParam();
#endif
if( impl->preferableBackend != backendId ) if( impl->preferableBackend != backendId )
{ {
impl->preferableBackend = backendId; impl->preferableBackend = backendId;
@ -3288,7 +3682,9 @@ String Net::dump()
switch (prefBackend) { switch (prefBackend) {
case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break; case DNN_BACKEND_DEFAULT: backend = "DEFAULT/"; break;
case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break; case DNN_BACKEND_HALIDE: backend = "HALIDE/"; break;
case DNN_BACKEND_INFERENCE_ENGINE: backend = "DLIE/"; break; case DNN_BACKEND_INFERENCE_ENGINE: // fallthru
case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: backend = "DLIE/"; break;
case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: backend = "NGRAPH/"; break;
case DNN_BACKEND_OPENCV: backend = "OCV/"; break; case DNN_BACKEND_OPENCV: backend = "OCV/"; break;
case DNN_BACKEND_CUDA: backend = "CUDA/"; break; case DNN_BACKEND_CUDA: backend = "CUDA/"; break;
} }
@ -3822,6 +4218,13 @@ Ptr<BackendNode> Layer::initInfEngine(const std::vector<Ptr<BackendWrapper> > &)
return Ptr<BackendNode>(); return Ptr<BackendNode>();
} }
Ptr<BackendNode> Layer::initNgraph(const std::vector<Ptr<BackendWrapper> > & inputs, const std::vector<Ptr<BackendNode> >& nodes)
{
CV_Error(Error::StsNotImplemented, "Inference Engine pipeline of " + type +
" layers is not defined.");
return Ptr<BackendNode>();
}
void Layer::applyHalideScheduler(Ptr<BackendNode>& node, const std::vector<Mat*> &inputs, void Layer::applyHalideScheduler(Ptr<BackendNode>& node, const std::vector<Mat*> &inputs,
const std::vector<Mat> &outputs, int targetId) const const std::vector<Mat> &outputs, int targetId) const
{ {

View File

@ -0,0 +1,679 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#include "precomp.hpp"
#include "ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_DNN_NGRAPH
#include <ie_extension.h>
#include <ie_plugin_dispatcher.hpp>
#endif // HAVE_DNN_NGRAPH
#include <opencv2/core/utils/configuration.private.hpp>
#include <opencv2/core/utils/logger.hpp>
namespace cv { namespace dnn {
#ifdef HAVE_DNN_NGRAPH
// For networks with input layer which has an empty name, IE generates a name id[some_number].
// OpenCV lets users use an empty input name and to prevent unexpected naming,
// we can use some predefined name.
static std::string kDefaultInpLayerName = "empty_inp_layer_name";
static std::vector<Ptr<NgraphBackendWrapper> >
ngraphWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
{
std::vector<Ptr<NgraphBackendWrapper> > wrappers(ptrs.size());
for (int i = 0; i < ptrs.size(); ++i)
{
CV_Assert(!ptrs[i].empty());
wrappers[i] = ptrs[i].dynamicCast<NgraphBackendWrapper>();
CV_Assert(!wrappers[i].empty());
}
return wrappers;
}
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(std::move(_node)) {}
InfEngineNgraphNode::InfEngineNgraphNode(std::shared_ptr<ngraph::Node>& _node)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH), node(_node) {}
void InfEngineNgraphNode::setName(const std::string& name) {
node->set_friendly_name(name);
}
InfEngineNgraphNet::InfEngineNgraphNet()
{
hasNetOwner = false;
device_name = "CPU";
}
InfEngineNgraphNet::InfEngineNgraphNet(InferenceEngine::CNNNetwork& net) : cnn(net)
{
hasNetOwner = true;
device_name = "CPU";
}
void InfEngineNgraphNet::addOutput(const std::string& name)
{
requestedOutputs.push_back(name);
}
void InfEngineNgraphNet::setNodePtr(std::shared_ptr<ngraph::Node>* ptr) {
all_nodes.emplace((*ptr)->get_friendly_name(), ptr);
}
void InfEngineNgraphNet::release() {
for (auto& node : components.back()) {
if (!(node->is_parameter() || node->is_output() || node->is_constant()) ) {
auto it = all_nodes.find(node->get_friendly_name());
if (it != all_nodes.end()) {
unconnectedNodes.erase(*(it->second));
it->second->reset();
all_nodes.erase(it);
}
}
}
}
void InfEngineNgraphNet::dfs(std::shared_ptr<ngraph::Node>& node,
std::vector<std::shared_ptr<ngraph::Node>>& comp,
std::unordered_map<std::string, bool>& used) {
used[node->get_friendly_name()] = true;
comp.push_back(node);
auto inputs = node->get_users();
for (size_t i = 0; i < node->get_input_size(); ++i) {
inputs.push_back(node->input_value(i).get_node()->shared_from_this());
}
for (auto& to : inputs) {
if (!used[to->get_friendly_name()]) {
dfs(to, comp, used);
}
}
}
int InfEngineNgraphNet::getNumComponents() {
if (!components.empty()) {
return components.size();
}
std::unordered_map<std::string, bool> used;
auto inputs = ngraph_function->get_ordered_ops();
for (auto& node : inputs) {
used.emplace(node->get_friendly_name(), false);
}
for (auto& node : inputs) {
if (!used[node->get_friendly_name()]) {
std::vector<std::shared_ptr<ngraph::Node>> current_comp;
dfs(node, current_comp, used);
components.push_back(current_comp);
}
}
return components.size();
}
void InfEngineNgraphNet::createNet(Target targetId) {
if (!hasNetOwner)
{
CV_Assert(!unconnectedNodes.empty());
ngraph::ResultVector outs;
for (auto& node : unconnectedNodes)
{
auto out = std::make_shared<ngraph::op::Result>(node);
outs.push_back(out);
}
CV_Assert_N(!inputs_vec.empty(), !outs.empty());
ngraph_function = std::make_shared<ngraph::Function>(outs, inputs_vec);
int num_comp = getNumComponents();
if (num_comp > 1) {
for (int i = num_comp - 1; i >= 0; --i) {
ngraph::ResultVector outputs;
ngraph::ParameterVector inps;
for (auto& node : components.back()) {
if (node->is_parameter()) {
auto parameter = std::dynamic_pointer_cast<ngraph::op::Parameter>(node);
inps.push_back(parameter);
}
else if (node->is_output()) {
auto result = std::dynamic_pointer_cast<ngraph::op::Result>(node);
outputs.push_back(result);
}
}
isInit = false;
CV_Assert_N(!inps.empty(), !outputs.empty());
ngraph_function = std::make_shared<ngraph::Function>(outputs, inps);
release();
components.pop_back();
init(targetId);
}
} else {
release();
components.clear();
init(targetId);
}
}
}
void InfEngineNgraphNet::init(Target targetId)
{
if (!hasNetOwner)
{
if (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) {
auto nodes = ngraph_function->get_ordered_ops();
for (auto& node : nodes) {
auto parameter = std::dynamic_pointer_cast<ngraph::op::Parameter>(node);
if (parameter && parameter->get_element_type() == ngraph::element::f32) {
parameter->set_element_type(ngraph::element::f16);
}
auto constant = std::dynamic_pointer_cast<ngraph::op::Constant>(node);
if (constant && constant->get_element_type() == ngraph::element::f32) {
auto data = constant->get_vector<float>();
std::vector<ngraph::float16> new_data(data.size());
for (size_t i = 0; i < data.size(); ++i) {
new_data[i] = ngraph::float16(data[i]);
}
auto new_const = std::make_shared<ngraph::op::Constant>(ngraph::element::f16, constant->get_shape(), new_data);
new_const->set_friendly_name(constant->get_friendly_name());
ngraph::replace_node(constant, new_const);
}
}
ngraph_function->validate_nodes_and_infer_types();
}
cnn = InferenceEngine::CNNNetwork(ngraph_function);
#ifdef _DEBUG // TODO
//cnn.serialize("/tmp/cnn.xml", "/tmp/cnn.bin");
#endif
}
switch (targetId)
{
case DNN_TARGET_CPU:
device_name = "CPU";
break;
case DNN_TARGET_OPENCL:
case DNN_TARGET_OPENCL_FP16:
device_name = "GPU";
break;
case DNN_TARGET_MYRIAD:
device_name = "MYRIAD";
break;
case DNN_TARGET_FPGA:
device_name = "FPGA";
break;
default:
CV_Error(Error::StsNotImplemented, "Unknown target");
};
if (!hasNetOwner) {
for (size_t i = 0; i < ngraph_function->get_output_size(); ++i) {
auto node = ngraph_function->output(i).get_node();
for (size_t j = 0; j < node->get_input_size(); ++j) {
std::string name = node->input_value(j).get_node()->get_friendly_name();
auto iter = std::find(requestedOutputs.begin(), requestedOutputs.end(), name);
if (iter != requestedOutputs.end()) {
requestedOutputs.erase(iter);
cnn.addOutput(name);
}
}
}
} else {
for (const auto& name : requestedOutputs)
{
cnn.addOutput(name);
}
}
for (const auto& it : cnn.getInputsInfo())
{
const std::string& name = it.first;
auto blobIt = allBlobs.find(name);
CV_Assert(blobIt != allBlobs.end());
it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision());
}
for (const auto& it : cnn.getOutputsInfo())
{
const std::string& name = it.first;
auto blobIt = allBlobs.find(name);
CV_Assert(blobIt != allBlobs.end());
it.second->setPrecision(blobIt->second->getTensorDesc().getPrecision()); // Should be always FP32
}
initPlugin(cnn);
}
ngraph::ParameterVector InfEngineNgraphNet::setInputs(const std::vector<cv::Mat>& inputs,
const std::vector<std::string>& names) {
CV_Assert_N(inputs.size() == names.size());
ngraph::ParameterVector current_inp;
for (size_t i = 0; i < inputs.size(); i++)
{
std::vector<size_t> shape = getShape<size_t>(inputs[i]);
auto inp = std::make_shared<ngraph::op::Parameter>(ngraph::element::f32, ngraph::Shape(shape));
inp->set_friendly_name(names[i]);
auto it = std::find_if(inputs_vec.begin(), inputs_vec.end(),
[&inp](const std::shared_ptr<ngraph::op::Parameter>& a) {
return a->get_friendly_name() == inp->get_friendly_name();
});
if (it == inputs_vec.end()) {
inputs_vec.push_back(inp);
current_inp.push_back(inp);
} else {
current_inp.push_back(*it);
}
}
return current_inp;
}
void InfEngineNgraphNet::setUnconnectedNodes(Ptr<InfEngineNgraphNode>& node) {
unconnectedNodes.insert(node->node);
}
void InfEngineNgraphNet::initPlugin(InferenceEngine::CNNNetwork& net)
{
CV_Assert(!isInitialized());
try
{
AutoLock lock(getInitializationMutex());
InferenceEngine::Core& ie = getCore();
{
isInit = true;
std::vector<std::string> candidates;
std::string param_pluginPath = utils::getConfigurationParameterString("OPENCV_DNN_IE_EXTRA_PLUGIN_PATH", "");
if (!param_pluginPath.empty())
{
candidates.push_back(param_pluginPath);
}
bool found = false;
for (size_t i = 0; i != candidates.size(); ++i)
{
const std::string& libName = candidates[i];
try
{
InferenceEngine::IExtensionPtr extension =
InferenceEngine::make_so_pointer<InferenceEngine::IExtension>(libName);
ie.AddExtension(extension, "CPU");
CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
found = true;
break;
}
catch(...) {}
}
if (!found && !candidates.empty())
{
CV_LOG_WARNING(NULL, "DNN-IE: Can't load extension plugin (extra layers for some networks). Specify path via OPENCV_DNN_IE_EXTRA_PLUGIN_PATH parameter");
}
// Some of networks can work without a library of extra layers.
// OpenCV fallbacks as extensions.
ie.AddExtension(std::make_shared<InfEngineExtension>(), "CPU");
#ifndef _WIN32
// Limit the number of CPU threads.
if (device_name == "CPU")
ie.SetConfig({{
InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
}}, device_name);
#endif
}
std::map<std::string, std::string> config;
if (device_name == "MYRIAD") {
config.emplace("VPU_DETECT_NETWORK_BATCH", CONFIG_VALUE(NO));
}
netExec = ie.LoadNetwork(net, device_name, config);
}
catch (const std::exception& ex)
{
CV_Error(Error::StsError, format("Failed to initialize Inference Engine backend (device = %s): %s", device_name.c_str(), ex.what()));
}
}
bool InfEngineNgraphNet::isInitialized()
{
return isInit;
}
bool NgraphBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const
{
InferenceEngine::ICNNNetwork::InputShapes inShapes = t_net.getInputShapes();
InferenceEngine::ICNNNetwork::InputShapes::iterator itr;
bool equal_flag = true;
size_t i = 0;
for (itr = inShapes.begin(); itr != inShapes.end(); ++itr)
{
InferenceEngine::SizeVector currentInShape(inputs[i].begin(), inputs[i].end());
if (itr->second != currentInShape)
{
itr->second = currentInShape;
equal_flag = false;
}
i++;
}
if (!equal_flag)
{
InferenceEngine::CNNNetwork curr_t_net(t_net);
curr_t_net.reshape(inShapes);
}
std::vector<size_t> dims = t_net.getOutputsInfo()[name]->getDims();
outputs.push_back(MatShape(dims.begin(), dims.end()));
return false;
}
bool NgraphBackendLayer::supportBackend(int backendId)
{
CV_LOG_DEBUG(NULL, "NgraphBackendLayer::supportBackend(" << backendId << ")");
return backendId == DNN_BACKEND_DEFAULT ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
}
void NgraphBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
OutputArrayOfArrays internals)
{
CV_Error(Error::StsInternal, "Choose Inference Engine as a preferable backend.");
}
static InferenceEngine::Layout estimateLayout(const Mat& m)
{
if (m.dims == 4)
return InferenceEngine::Layout::NCHW;
else if (m.dims == 2)
return InferenceEngine::Layout::NC;
else
return InferenceEngine::Layout::ANY;
}
static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "")
{
std::vector<size_t> shape = getShape<size_t>(m);
if (m.type() == CV_32F)
return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
{InferenceEngine::Precision::FP32, shape, estimateLayout(m)}));
else if (m.type() == CV_8U)
return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
{InferenceEngine::Precision::U8, shape, estimateLayout(m)}));
else
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
}
InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, const std::vector<size_t>& shape,
InferenceEngine::Layout layout)
{
if (m.type() == CV_32F)
return InferenceEngine::make_shared_blob<float>(
{InferenceEngine::Precision::FP32, shape, layout}, (float*)m.data);
else if (m.type() == CV_8U)
return InferenceEngine::make_shared_blob<uint8_t>(
{InferenceEngine::Precision::U8, shape, layout}, (uint8_t*)m.data);
else
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
}
InferenceEngine::Blob::Ptr wrapToNgraphBlob(const Mat& m, InferenceEngine::Layout layout)
{
std::vector<size_t> shape = getShape<size_t>(m);
return wrapToNgraphBlob(m, shape, layout);
}
NgraphBackendWrapper::NgraphBackendWrapper(int targetId, const cv::Mat& m)
: BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, targetId)
{
dataPtr = wrapToInfEngineDataNode(m);
blob = wrapToNgraphBlob(m, estimateLayout(m));
}
NgraphBackendWrapper::NgraphBackendWrapper(Ptr<BackendWrapper> wrapper)
: BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, wrapper->targetId)
{
Ptr<NgraphBackendWrapper> ieWrapper = wrapper.dynamicCast<NgraphBackendWrapper>();
CV_Assert(!ieWrapper.empty());
InferenceEngine::DataPtr srcData = ieWrapper->dataPtr;
dataPtr = InferenceEngine::DataPtr(new InferenceEngine::Data(srcData->getName(), srcData->getTensorDesc()));
blob = ieWrapper->blob;
}
Ptr<BackendWrapper> NgraphBackendWrapper::create(Ptr<BackendWrapper> wrapper)
{
return Ptr<BackendWrapper>(new NgraphBackendWrapper(wrapper));
}
NgraphBackendWrapper::~NgraphBackendWrapper()
{
// nothing
}
void NgraphBackendWrapper::copyToHost()
{
CV_LOG_DEBUG(NULL, "NgraphBackendWrapper::copyToHost()");
//CV_Error(Error::StsNotImplemented, "");
}
void NgraphBackendWrapper::setHostDirty()
{
CV_LOG_DEBUG(NULL, "NgraphBackendWrapper::setHostDirty()");
//CV_Error(Error::StsNotImplemented, "");
}
InferenceEngine::Blob::Ptr copyBlob(const InferenceEngine::Blob::Ptr& blob)
{
InferenceEngine::Blob::Ptr copy;
auto description = blob->getTensorDesc();
InferenceEngine::Precision precision = description.getPrecision();
if (precision == InferenceEngine::Precision::FP32)
{
copy = InferenceEngine::make_shared_blob<float>(description);
}
else if (precision == InferenceEngine::Precision::U8)
{
copy = InferenceEngine::make_shared_blob<uint8_t>(description);
}
else
CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
copy->allocate();
return copy;
}
InferenceEngine::DataPtr ngraphDataNode(const Ptr<BackendWrapper>& ptr)
{
CV_Assert(!ptr.empty());
Ptr<NgraphBackendWrapper> p = ptr.dynamicCast<NgraphBackendWrapper>();
CV_Assert(!p.empty());
return p->dataPtr;
}
void forwardNgraph(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
Ptr<BackendNode>& node, bool isAsync)
{
CV_Assert(!node.empty());
Ptr<InfEngineNgraphNode> ieNode = node.dynamicCast<InfEngineNgraphNode>();
CV_Assert(!ieNode.empty());
ieNode->net->forward(outBlobsWrappers, isAsync);
}
void InfEngineNgraphNet::addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs)
{
auto wrappers = ngraphWrappers(ptrs);
for (const auto& wrapper : wrappers)
{
std::string name = wrapper->dataPtr->getName();
name = name.empty() ? kDefaultInpLayerName : name;
allBlobs.insert({name, wrapper->blob});
}
}
void InfEngineNgraphNet::NgraphReqWrapper::makePromises(const std::vector<Ptr<BackendWrapper> >& outsWrappers)
{
auto outs = ngraphWrappers(outsWrappers);
outProms.clear();
outProms.resize(outs.size());
outsNames.resize(outs.size());
for (int i = 0; i < outs.size(); ++i)
{
outs[i]->futureMat = outProms[i].getArrayResult();
outsNames[i] = outs[i]->dataPtr->getName();
}
}
Mat ngraphBlobToMat(const InferenceEngine::Blob::Ptr& blob)
{
std::vector<size_t> dims = blob->getTensorDesc().getDims();
std::vector<int> size(dims.begin(), dims.end());
auto precision = blob->getTensorDesc().getPrecision();
int type = -1;
switch (precision)
{
case InferenceEngine::Precision::FP32: type = CV_32F; break;
case InferenceEngine::Precision::U8: type = CV_8U; break;
default:
CV_Error(Error::StsNotImplemented, "Unsupported blob precision");
}
return Mat(size, type, (void*)blob->buffer());
}
void InfEngineNgraphNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, bool isAsync)
{
CV_LOG_DEBUG(NULL, "InfEngineNgraphNet::forward(" << (isAsync ? "async" : "sync") << ")");
// Look for finished requests.
Ptr<NgraphReqWrapper> reqWrapper;
for (auto& wrapper : infRequests)
{
if (wrapper->isReady)
{
reqWrapper = wrapper;
break;
}
}
if (reqWrapper.empty())
{
reqWrapper = Ptr<NgraphReqWrapper>(new NgraphReqWrapper());
try
{
reqWrapper->req = netExec.CreateInferRequest();
}
catch (const std::exception& ex)
{
CV_Error(Error::StsAssert, format("Failed to initialize Inference Engine backend: %s", ex.what()));
}
infRequests.push_back(reqWrapper);
InferenceEngine::BlobMap inpBlobs, outBlobs;
for (const auto& it : cnn.getInputsInfo())
{
const std::string& name = it.first;
auto blobIt = allBlobs.find(name);
CV_Assert(blobIt != allBlobs.end());
inpBlobs[name] = isAsync ? copyBlob(blobIt->second) : blobIt->second;
}
for (const auto& it : cnn.getOutputsInfo())
{
const std::string& name = it.first;
auto blobIt = allBlobs.find(name);
CV_Assert(blobIt != allBlobs.end());
outBlobs[name] = isAsync ? copyBlob(blobIt->second) : blobIt->second;
}
reqWrapper->req.SetInput(inpBlobs);
reqWrapper->req.SetOutput(outBlobs);
InferenceEngine::IInferRequest::Ptr infRequestPtr = reqWrapper->req;
infRequestPtr->SetUserData(reqWrapper.get(), 0);
infRequestPtr->SetCompletionCallback(
[](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status)
{
CV_LOG_DEBUG(NULL, "DNN(nGraph): completionCallback(" << (int)status << ")");
NgraphReqWrapper* wrapper;
request->GetUserData((void**)&wrapper, 0);
CV_Assert(wrapper && "Internal error");
size_t processedOutputs = 0;
try
{
for (; processedOutputs < wrapper->outProms.size(); ++processedOutputs)
{
const std::string& name = wrapper->outsNames[processedOutputs];
Mat m = ngraphBlobToMat(wrapper->req.GetBlob(name));
try
{
CV_Assert(status == InferenceEngine::StatusCode::OK);
wrapper->outProms[processedOutputs].setValue(m.clone());
}
catch (...)
{
try {
wrapper->outProms[processedOutputs].setException(std::current_exception());
} catch(...) {
CV_LOG_ERROR(NULL, "DNN: Exception occured during async inference exception propagation");
}
}
}
}
catch (...)
{
std::exception_ptr e = std::current_exception();
for (; processedOutputs < wrapper->outProms.size(); ++processedOutputs)
{
try {
wrapper->outProms[processedOutputs].setException(e);
} catch(...) {
CV_LOG_ERROR(NULL, "DNN: Exception occured during async inference exception propagation");
}
}
}
wrapper->isReady = true;
}
);
}
if (isAsync)
{
// Copy actual data to infer request's input blobs.
for (const auto& it : cnn.getInputsInfo())
{
const std::string& name = it.first;
auto blobIt = allBlobs.find(name);
Mat srcMat = ngraphBlobToMat(blobIt->second);
Mat dstMat = ngraphBlobToMat(reqWrapper->req.GetBlob(name));
srcMat.copyTo(dstMat);
}
// Set promises to output blobs wrappers.
reqWrapper->makePromises(outBlobsWrappers);
reqWrapper->isReady = false;
reqWrapper->req.StartAsync();
}
else
{
reqWrapper->req.Infer();
}
}
#else
void forwardNgraph(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
Ptr<BackendNode>& node, bool isAsync)
{
CV_Assert(false && "nGraph is not enabled in this OpenCV build");
}
#endif
}}

View File

@ -0,0 +1,144 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#ifndef __OPENCV_DNN_IE_NGRAPH_HPP__
#define __OPENCV_DNN_IE_NGRAPH_HPP__
#include "op_inf_engine.hpp"
#ifdef HAVE_DNN_NGRAPH
#include <ngraph/ngraph.hpp>
#endif // HAVE_DNN_NGRAPH
namespace cv { namespace dnn {
#ifdef HAVE_DNN_NGRAPH
class InfEngineNgraphNode;
class InfEngineNgraphNet
{
public:
InfEngineNgraphNet();
InfEngineNgraphNet(InferenceEngine::CNNNetwork& net);
void addOutput(const std::string& name);
bool isInitialized();
void init(Target targetId);
void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, bool isAsync);
void initPlugin(InferenceEngine::CNNNetwork& net);
ngraph::ParameterVector setInputs(const std::vector<cv::Mat>& inputs, const std::vector<std::string>& names);
void setUnconnectedNodes(Ptr<InfEngineNgraphNode>& node);
void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
void createNet(Target targetId);
void setNodePtr(std::shared_ptr<ngraph::Node>* ptr);
private:
void release();
int getNumComponents();
void dfs(std::shared_ptr<ngraph::Node>& node, std::vector<std::shared_ptr<ngraph::Node>>& comp,
std::unordered_map<std::string, bool>& used);
ngraph::ParameterVector inputs_vec;
std::shared_ptr<ngraph::Function> ngraph_function;
std::vector<std::vector<std::shared_ptr<ngraph::Node>>> components;
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>* > all_nodes;
InferenceEngine::ExecutableNetwork netExec;
InferenceEngine::BlobMap allBlobs;
std::string device_name;
bool isInit = false;
struct NgraphReqWrapper
{
NgraphReqWrapper() : isReady(true) {}
void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);
InferenceEngine::InferRequest req;
std::vector<cv::AsyncPromise> outProms;
std::vector<std::string> outsNames;
bool isReady;
};
std::vector<Ptr<NgraphReqWrapper> > infRequests;
InferenceEngine::CNNNetwork cnn;
bool hasNetOwner;
std::vector<std::string> requestedOutputs;
std::unordered_set<std::shared_ptr<ngraph::Node>> unconnectedNodes;
};
class InfEngineNgraphNode : public BackendNode
{
public:
InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node);
InfEngineNgraphNode(std::shared_ptr<ngraph::Node>& _node);
void setName(const std::string& name);
// Inference Engine network object that allows to obtain the outputs of this layer.
std::shared_ptr<ngraph::Node> node;
Ptr<InfEngineNgraphNet> net;
};
class NgraphBackendWrapper : public BackendWrapper
{
public:
NgraphBackendWrapper(int targetId, const Mat& m);
NgraphBackendWrapper(Ptr<BackendWrapper> wrapper);
~NgraphBackendWrapper();
static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
virtual void copyToHost() CV_OVERRIDE;
virtual void setHostDirty() CV_OVERRIDE;
InferenceEngine::DataPtr dataPtr;
InferenceEngine::Blob::Ptr blob;
AsyncArray futureMat;
};
InferenceEngine::DataPtr ngraphDataNode(const Ptr<BackendWrapper>& ptr);
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class NgraphBackendLayer : public Layer
{
public:
NgraphBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE;
virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
OutputArrayOfArrays internals) CV_OVERRIDE;
virtual bool supportBackend(int backendId) CV_OVERRIDE;
private:
InferenceEngine::CNNNetwork t_net;
};
#endif // HAVE_DNN_NGRAPH
void forwardNgraph(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
Ptr<BackendNode>& node, bool isAsync);
}} // namespace cv::dnn
#endif // __OPENCV_DNN_IE_NGRAPH_HPP__

View File

@ -14,6 +14,8 @@ Implementation of Batch Normalization layer.
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
@ -163,7 +165,7 @@ public:
return (backendId == DNN_BACKEND_OPENCV) || return (backendId == DNN_BACKEND_OPENCV) ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide()) || (backendId == DNN_BACKEND_HALIDE && haveHalide()) ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4)); ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && (preferableTarget == DNN_TARGET_CPU || dims == 4));
} }
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
@ -382,6 +384,20 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> shape(ieInpNode->get_shape().size(), 1);
shape[1] = weights_.total();
auto weight = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), weights_.data);
auto bias = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), bias_.data);
auto scale_node = std::make_shared<ngraph::op::v1::Multiply>(ieInpNode, weight, ngraph::op::AutoBroadcastType::NUMPY);
auto scale_shift = std::make_shared<ngraph::op::v1::Add>(scale_node, bias, ngraph::op::AutoBroadcastType::NUMPY);
return Ptr<BackendNode>(new InfEngineNgraphNode(scale_shift));
}
#endif // HAVE_DNN_NGRAPH
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

View File

@ -42,6 +42,7 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
#include "../cuda4dnn/primitives/reshape.hpp" #include "../cuda4dnn/primitives/reshape.hpp"
@ -64,7 +65,7 @@ public:
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs, bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -150,6 +151,18 @@ public:
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
ngraph::NodeVector inp{ieInpNode};
auto blank = std::make_shared<ngraph::op::Concat>(inp, 0);
return Ptr<BackendNode>(new InfEngineNgraphNode(blank));
}
#endif // HAVE_DNN_NGRAPH
}; };
Ptr<Layer> BlankLayer::create(const LayerParams& params) Ptr<Layer> BlankLayer::create(const LayerParams& params)

View File

@ -45,6 +45,7 @@
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "../op_vkcom.hpp" #include "../op_vkcom.hpp"
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
@ -113,7 +114,7 @@ public:
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1 && !padding) || // By channels
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !padding) || ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !padding) ||
(backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding); (backendId == DNN_BACKEND_VKCOM && haveVulkan() && !padding);
} }
@ -344,6 +345,23 @@ public:
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer)); return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(inputs.size() == nodes.size());
ngraph::NodeVector inp_nodes;
for (auto& node : nodes) {
inp_nodes.push_back(node.dynamicCast<InfEngineNgraphNode>()->node);
}
InferenceEngine::DataPtr data = ngraphDataNode(inputs[0]);
auto concat = std::make_shared<ngraph::op::Concat>(inp_nodes, clamp(axis, data->getDims().size()));
return Ptr<BackendNode>(new InfEngineNgraphNode(concat));
}
#endif // HAVE_DNN_NGRAPH
}; };
Ptr<ConcatLayer> ConcatLayer::create(const LayerParams& params) Ptr<ConcatLayer> ConcatLayer::create(const LayerParams& params)

View File

@ -33,7 +33,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
backendId == DNN_BACKEND_CUDA; backendId == DNN_BACKEND_CUDA;
} }

View File

@ -45,7 +45,9 @@
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "../op_vkcom.hpp" #include "../op_vkcom.hpp"
#include "opencv2/core/hal/hal.hpp" #include "opencv2/core/hal/hal.hpp"
#include "opencv2/core/hal/intrin.hpp" #include "opencv2/core/hal/intrin.hpp"
#include <iostream> #include <iostream>
@ -270,7 +272,7 @@ public:
} }
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{ {
if (kernel_size.size() == 3) if (kernel_size.size() == 3)
return preferableTarget == DNN_TARGET_CPU; return preferableTarget == DNN_TARGET_CPU;
@ -618,6 +620,73 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert_N(inputs.size() == 1, nodes.size() == 1);
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> dims = ieInpNode->get_shape();
CV_Assert(dims.size() == 4 || dims.size() == 5);
const int inpCn = dims[1];
const int outCn = blobs[0].size[0];
const int inpGroupCn = blobs[0].size[1];
const int group = inpCn / inpGroupCn;
std::vector<size_t> kernel_shape = getShape<size_t>(blobs[0]);
auto ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, kernel_shape, blobs[0].data);
if (fusedWeights)
{
if (weightsMat.isContinuous())
{
ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, kernel_shape, weightsMat.data);
}
else
{
Mat newWeights = blobs[0].reshape(1, outCn);
Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
cvWeights.copyTo(newWeights);
ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, kernel_shape, blobs[0].data);
}
}
ngraph::op::PadType pad_type = ngraph::op::PadType::EXPLICIT;
if (!padMode.empty())
pad_type = padMode == "VALID" ? ngraph::op::PadType::VALID : ngraph::op::PadType::SAME_UPPER;
std::shared_ptr<ngraph::Node> conv_node;
if (group != 1) {
conv_node = std::make_shared<ngraph::op::GroupConvolution>(
ieInpNode, ieWeights,
ngraph::Strides(strides),
ngraph::Strides(dilations),
ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_begin.begin(), pads_begin.end())),
ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_end.begin(), pads_end.end())),
ngraph::Strides{},
group,
pad_type);
} else {
conv_node = std::make_shared<ngraph::op::v1::Convolution>(
ieInpNode, ieWeights,
ngraph::Strides(strides),
ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_begin.begin(), pads_begin.end())),
ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_end.begin(), pads_end.end())),
ngraph::Strides(dilations),
pad_type);
}
if (hasBias() || fusedBias)
{
std::vector<size_t> shape(conv_node->get_shape().size(), 1);
shape[1] = outCn;
auto bias = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), biasvec.data());
auto conv_bias = std::make_shared<ngraph::op::v1::Add>(conv_node, bias, ngraph::op::AutoBroadcastType::NUMPY);
return Ptr<BackendNode>(new InfEngineNgraphNode(conv_bias));
}
return Ptr<BackendNode>(new InfEngineNgraphNode(conv_node));
}
#endif // HAVE_DNN_NGRAPH
class ParallelConv : public cv::ParallelLoopBody class ParallelConv : public cv::ParallelLoopBody
{ {
public: public:
@ -1410,7 +1479,24 @@ public:
const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or IODHW layout const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW or IODHW layout
const int group = numOutput / outGroupCn; const int group = numOutput / outGroupCn;
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
if (padMode.empty()) {
for (int i = 0; i < adjust_pads.size(); i++) {
if (pads_end[i] < adjust_pads[i])
return false;
}
} else if (padMode == "SAME") {
for (int i = 0; i < adjust_pads.size(); i++) {
if (kernel_size[i] < pads_begin[i] + 1 + adjust_pads[i])
return false;
}
} else if (padMode == "VALID")
return false;
return group == 1;
}
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) { if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) {
return false; return false;
@ -2153,6 +2239,70 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
const int outGroupCn = blobs[0].size[1];
const int group = numOutput / outGroupCn;
CV_Assert(group == 1);
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> kernel_shape = getShape<size_t>(blobs[0]);
auto ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, kernel_shape, blobs[0].data);
if (fusedWeights)
{
int inpCn = blobs[0].size[0];
Mat newWeights = blobs[0].reshape(1, inpCn);
transpose(weightsMat, newWeights);
}
size_t batch = ieInpNode->get_shape()[0];
std::vector<size_t> out_shape = {batch, (size_t)numOutput};
std::vector<size_t> paddings_end;
std::vector<size_t> inpShape = ieInpNode->get_shape();
if (padMode.empty())
{
for (int i = 0; i < pads_end.size(); i++) {
out_shape.push_back(strides[i] * (inpShape[2 + i] - 1) +
kernel_size[i] - pads_begin[i] - pads_end[i] + adjust_pads[i]);
paddings_end.push_back(pads_end[i] - adjust_pads[i]);
}
}
else if (padMode == "SAME")
{
for (int i = 0; i < pads_begin.size(); i++) {
out_shape.push_back(strides[i] * (inpShape[2 + i] - 1) + 1 + adjust_pads[i]);
paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]);
}
} else {
paddings_end = pads_end;
}
auto deconv = std::make_shared<ngraph::op::ConvolutionBackpropData>(
ngraph::Shape{out_shape},
ieWeights,
ieInpNode,
ngraph::Strides(strides),
ngraph::Strides(dilations),
ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_begin.begin(), pads_begin.end())),
ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(paddings_end.begin(), paddings_end.end())),
(strides.size() == 2 ? ngraph::Strides{1, 1} : ngraph::Strides{1, 1, 1}));
if (hasBias() || fusedBias)
{
std::vector<size_t> shape(deconv->get_shape().size(), 1);
shape[1] = numOutput;
auto bias = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), blobs[1].data);
auto deconv_bias = std::make_shared<ngraph::op::v1::Add>(deconv, bias, ngraph::op::AutoBroadcastType::NUMPY);
return Ptr<BackendNode>(new InfEngineNgraphNode(deconv_bias));
}
return Ptr<BackendNode>(new InfEngineNgraphNode(deconv));
}
#endif // HAVE_DNN_NGRAPH
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

View File

@ -43,6 +43,7 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include <float.h> #include <float.h>
#include <string> #include <string>
#include "../nms.inl.hpp" #include "../nms.inl.hpp"
@ -51,6 +52,41 @@
#include "opencl_kernels_dnn.hpp" #include "opencl_kernels_dnn.hpp"
#endif #endif
#ifdef HAVE_DNN_NGRAPH
#include "../ie_ngraph.hpp"
#include <ngraph/op/experimental/layers/detection_output.hpp>
namespace ngraph {
namespace op {
class Dummy : public Op {
public:
Dummy() : Op("Dummy", {}) {
constructor_validate_and_infer_types();
}
void validate_and_infer_types() override {
set_output_type(0, ngraph::element::Type(), {});
}
std::shared_ptr<Node> copy_with_new_args(const NodeVector& new_args) const override {
if (!new_args.empty())
throw ngraph_error("Incorrect number of new arguments");
return std::make_shared<Dummy>();
}
static constexpr NodeTypeInfo type_info{"Dummy", 1};
const NodeTypeInfo& get_type_info() const override {
return type_info;
}
};
constexpr NodeTypeInfo Dummy::type_info;
} // namespace op
} // namespace ngraph
#endif
namespace cv namespace cv
{ {
namespace dnn namespace dnn
@ -198,7 +234,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && !_locPredTransposed && _bboxesNormalized); ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !_locPredTransposed && _bboxesNormalized);
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs, bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -941,6 +977,36 @@ public:
return Ptr<BackendNode>(new InfEngineBackendNode(l)); return Ptr<BackendNode>(new InfEngineBackendNode(l));
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(nodes.size() == 3);
auto& box_logits = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto& class_preds = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
auto& proposals = nodes[2].dynamicCast<InfEngineNgraphNode>()->node;
ngraph::op::DetectionOutputAttrs attrs;
attrs.num_classes = _numClasses;
attrs.background_label_id = _backgroundLabelId;
attrs.top_k = _topK > 0 ? _topK : _keepTopK;
attrs.variance_encoded_in_target = _varianceEncodedInTarget;
attrs.keep_top_k = {_keepTopK};
attrs.nms_threshold = _nmsThreshold;
attrs.confidence_threshold = _confidenceThreshold;
attrs.share_location = _shareLocation;
attrs.clip_before_nms = _clip;
attrs.code_type = std::string{"caffe.PriorBoxParameter." + _codeType};
attrs.normalized = true;
auto aux_class_preds = std::make_shared<ngraph::op::Dummy>();
auto aux_box_preds = std::make_shared<ngraph::op::Dummy>();
auto det_out = std::make_shared<ngraph::op::DetectionOutput>(box_logits, class_preds,
proposals, aux_class_preds, aux_box_preds, attrs);
return Ptr<BackendNode>(new InfEngineNgraphNode(det_out));
}
#endif // HAVE_DNN_NGRAPH
}; };
float util::caffe_box_overlap(const util::NormalizedBBox& a, const util::NormalizedBBox& b) float util::caffe_box_overlap(const util::NormalizedBBox& a, const util::NormalizedBBox& b)

View File

@ -45,7 +45,9 @@
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "../op_vkcom.hpp" #include "../op_vkcom.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#include <iostream> #include <iostream>
@ -165,6 +167,15 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto node = func.initNgraphAPI(ieInpNode);
return Ptr<BackendNode>(new InfEngineNgraphNode(node));
}
#endif // HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
{ {
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
@ -276,8 +287,10 @@ struct ReLUFunctor
bool supportBackend(int backendId, int) bool supportBackend(int backendId, int)
{ {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1); return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif #endif
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
@ -389,6 +402,17 @@ struct ReLUFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
if (slope) {
auto param = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &slope);
return std::make_shared<ngraph::op::PRelu>(node, param);
}
return std::make_shared<ngraph::op::Relu>(node);
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -420,7 +444,7 @@ struct ReLU6Functor
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE; backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@ -509,6 +533,13 @@ struct ReLU6Functor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
return std::make_shared<ngraph::op::Clamp>(node, minValue, maxValue);
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -533,7 +564,7 @@ struct TanHFunctor
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE; backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@ -598,6 +629,13 @@ struct TanHFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
return std::make_shared<ngraph::op::Tanh>(node);
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -686,6 +724,13 @@ struct SwishFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
CV_Error(Error::StsNotImplemented, "");
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -775,6 +820,13 @@ struct MishFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
CV_Error(Error::StsNotImplemented, "");
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -800,7 +852,7 @@ struct SigmoidFunctor
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE; backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@ -865,6 +917,13 @@ struct SigmoidFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
return std::make_shared<ngraph::op::Sigmoid>(node);
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -891,7 +950,7 @@ struct ELUFunctor
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE; backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@ -956,6 +1015,13 @@ struct ELUFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
return std::make_shared<ngraph::op::Elu>(node, 1.0);
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -978,7 +1044,7 @@ struct AbsValFunctor
bool supportBackend(int backendId, int) bool supportBackend(int backendId, int)
{ {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1); return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
#endif #endif
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
@ -1048,6 +1114,16 @@ struct AbsValFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
float coeff = -0.999999f;
// float coeff = preferableTarget == DNN_TARGET_MYRIAD ? -0.999f : -0.999999f;
auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &coeff);
return std::make_shared<ngraph::op::PRelu>(node, slope);
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -1138,6 +1214,13 @@ struct BNLLFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
CV_Error(Error::StsNotImplemented, "");
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -1166,8 +1249,10 @@ struct PowerFunctor
bool supportBackend(int backendId, int targetId) bool supportBackend(int backendId, int targetId)
{ {
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5; return (targetId != DNN_TARGET_OPENCL && targetId != DNN_TARGET_OPENCL_FP16) || power == 1.0 || power == 0.5;
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
else else
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
@ -1265,6 +1350,22 @@ struct PowerFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape{1}, &scale);
auto shift_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape{1}, &shift);
auto power_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape{1}, &power);
auto mul = std::make_shared<ngraph::op::v1::Multiply>(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY);
auto scale_shift = std::make_shared<ngraph::op::v1::Add>(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY);
return std::make_shared<ngraph::op::v1::Power>(scale_shift, power_node, ngraph::op::AutoBroadcastType::NUMPY);
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {
@ -1320,7 +1421,7 @@ struct ChannelsPReLUFunctor
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_HALIDE ||
backendId == DNN_BACKEND_INFERENCE_ENGINE; backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
@ -1420,6 +1521,15 @@ struct ChannelsPReLUFunctor
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
{
const size_t numChannels = scale.total();
auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{numChannels}, scale.data);
return std::make_shared<ngraph::op::PRelu>(node, slope);
}
#endif // HAVE_DNN_NGRAPH
#ifdef HAVE_VULKAN #ifdef HAVE_VULKAN
std::shared_ptr<vkcom::OpBase> initVkCom() std::shared_ptr<vkcom::OpBase> initVkCom()
{ {

View File

@ -45,6 +45,7 @@
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
#include "opencl_kernels_dnn.hpp" #include "opencl_kernels_dnn.hpp"
@ -109,8 +110,8 @@ public:
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_CUDA && op != DIV) || // TODO: not implemented, see PR #15811 (backendId == DNN_BACKEND_CUDA && op != DIV) || // TODO: not implemented, see PR #15811
(backendId == DNN_BACKEND_HALIDE && op != DIV) || // TODO: not implemented, see PR #15811 (backendId == DNN_BACKEND_HALIDE && op != DIV) || // TODO: not implemented, see PR #15811
(backendId == DNN_BACKEND_INFERENCE_ENGINE && !variableChannels && ((((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (preferableTarget != DNN_TARGET_OPENCL || coeffs.empty()))
(preferableTarget != DNN_TARGET_OPENCL || coeffs.empty())); || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && !variableChannels));
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs, bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -550,6 +551,36 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto curr_node = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
if (!coeffs.empty()) {
auto coeff = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &coeffs[0]);
curr_node = std::make_shared<ngraph::op::v1::Multiply>(curr_node, coeff, ngraph::op::AutoBroadcastType::NUMPY);
}
for (size_t i = 1; i < nodes.size(); i++)
{
auto next_node = nodes[i].dynamicCast<InfEngineNgraphNode>()->node;
if (!coeffs.empty()) {
auto coeff = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &coeffs[i]);
next_node = std::make_shared<ngraph::op::v1::Multiply>(next_node, coeff, ngraph::op::AutoBroadcastType::NUMPY);
}
switch (op) {
case SUM: curr_node = std::make_shared<ngraph::op::v1::Add>(curr_node, next_node); break;
case PROD: curr_node = std::make_shared<ngraph::op::v1::Multiply>(curr_node, next_node); break;
case DIV: curr_node = std::make_shared<ngraph::op::v1::Divide>(curr_node, next_node); break;
case MAX: curr_node = std::make_shared<ngraph::op::v1::Maximum>(curr_node, next_node); break;
default: CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
}
}
return Ptr<BackendNode>(new InfEngineNgraphNode(curr_node));
}
#endif // HAVE_DNN_NGRAPH
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

View File

@ -44,6 +44,8 @@
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <float.h> #include <float.h>
#include <algorithm> #include <algorithm>
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
@ -72,7 +74,7 @@ public:
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs, bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -195,6 +197,34 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> dims = ieInpNode->get_shape();
int numAxes = dims.size();
int startAxis = clamp(_startAxis, numAxes);
int endAxis = clamp(_endAxis, numAxes);
CV_Assert(startAxis >= 0);
CV_Assert(endAxis >= startAxis && endAxis < numAxes);
int64_t flattenedDimensionSize = std::accumulate(dims.begin() + startAxis,
dims.begin() + endAxis + 1, 1, std::multiplies<size_t>());
std::vector<int64_t> outputShapeVec(dims.begin(), dims.begin() + startAxis);
outputShapeVec.push_back(flattenedDimensionSize);
outputShapeVec.insert(outputShapeVec.end(), dims.begin() + endAxis + 1, dims.end());
auto shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape({outputShapeVec.size()}), outputShapeVec.data());
auto reshape = std::make_shared<ngraph::op::v1::Reshape>(ieInpNode, shape, true);
return Ptr<BackendNode>(new InfEngineNgraphNode(reshape));
}
#endif // HAVE_DNN_NGRAPH
// HAVE_INF_ENGINE
int _startAxis; int _startAxis;
int _endAxis; int _endAxis;
}; };

View File

@ -45,6 +45,8 @@
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
@ -131,7 +133,7 @@ public:
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) || (backendId == DNN_BACKEND_HALIDE && haveHalide() && axis == 1) ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && axis == 1); ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && axis == 1);
} }
virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE virtual bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
@ -481,6 +483,31 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
int batch = ieInpNode->get_shape()[0];
std::vector<size_t> data = {(size_t)batch, (size_t)blobs[0].size[1]};
auto new_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, data.data());
auto inp = std::make_shared<ngraph::op::v1::Reshape>(ieInpNode, new_shape, true);
std::vector<size_t> weight_shape{(size_t)blobs[0].size[0], (size_t)blobs[0].size[1]};
auto ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, weight_shape, blobs[0].data);
auto matmul = std::make_shared<ngraph::op::MatMul>(inp, ieWeights, false, true);
if (bias) {
auto bias_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape{(size_t)blobs[1].size[1]}, blobs[1].data);
auto fc = std::make_shared<ngraph::op::v1::Add>(matmul, bias_node, ngraph::op::AutoBroadcastType::NUMPY);
return Ptr<BackendNode>(new InfEngineNgraphNode(fc));
}
return Ptr<BackendNode>(new InfEngineNgraphNode(matmul));
}
#endif // HAVE_DNN_NGRAPH
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

View File

@ -45,7 +45,9 @@
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "../op_vkcom.hpp" #include "../op_vkcom.hpp"
#include "opencv2/imgproc.hpp" #include "opencv2/imgproc.hpp"
#include "opencv2/dnn/shape_utils.hpp" #include "opencv2/dnn/shape_utils.hpp"
#include "opencv2/core/hal/hal.hpp" #include "opencv2/core/hal/hal.hpp"
@ -97,8 +99,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
return bias == (int)bias; return bias == (int)bias;
}
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
return type == CHANNEL_NRM && bias == (int)bias;
}
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_HALIDE ||
@ -457,6 +463,19 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
float alphaSize = alpha;
if (!normBySize)
alphaSize *= (type == SPATIAL_NRM ? size*size : size);
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto lrn = std::make_shared<ngraph::op::LRN>(ieInpNode, (double)alphaSize, (double)beta, (double)bias, (size_t)size);
return Ptr<BackendNode>(new InfEngineNgraphNode(lrn));
}
#endif // HAVE_DNN_NGRAPH
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

View File

@ -43,6 +43,8 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
@ -117,7 +119,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return !zeroDev && (preferableTarget != DNN_TARGET_MYRIAD || eps <= 1e-7f); return !zeroDev && (preferableTarget != DNN_TARGET_MYRIAD || eps <= 1e-7f);
else else
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
@ -382,6 +384,16 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto mvn = std::make_shared<ngraph::op::MVN>(ieInpNode, acrossChannels, normVariance, eps);
return Ptr<BackendNode>(new InfEngineNgraphNode(mvn));
}
#endif // HAVE_DNN_NGRAPH
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

View File

@ -44,6 +44,7 @@
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
#include "../cuda4dnn/primitives/normalize_bbox.hpp" #include "../cuda4dnn/primitives/normalize_bbox.hpp"
@ -69,7 +70,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{ {
if (pnorm != 2) if (pnorm != 2)
return false; return false;
@ -342,6 +343,45 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
const size_t batch = ieInpNode->get_shape()[0];
const size_t numChannels = ieInpNode->get_shape()[1];
std::vector<int64_t> axes_data;
if (!acrossSpatial) {
axes_data.push_back(1);
} else {
axes_data.resize(ieInpNode->get_shape().size());
std::iota(axes_data.begin(), axes_data.end(), 0);
}
auto axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes_data.size()}, axes_data);
auto norm = std::make_shared<ngraph::op::NormalizeL2>(ieInpNode, axes, epsilon, ngraph::op::EpsMode::ADD);
CV_Assert(blobs.empty() || numChannels == blobs[0].total());
std::vector<size_t> shape(ieInpNode->get_shape().size(), 1);
shape[0] = blobs.empty() ? 1 : batch;
shape[1] = numChannels;
std::shared_ptr<ngraph::op::Constant> weight;
if (blobs.empty())
{
std::vector<float> ones(numChannels, 1);
weight = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), ones.data());
}
else
{
// weight->get_shape().size() > 1 ~> channel_shared = false
weight = std::make_shared<ngraph::op::Constant>(
ngraph::element::f32, ngraph::Shape(shape), blobs[0].data);
}
auto mul = std::make_shared<ngraph::op::v1::Multiply>(norm, weight, ngraph::op::AutoBroadcastType::NUMPY);
return Ptr<BackendNode>(new InfEngineNgraphNode(mul));
}
#endif // HAVE_DNN_NGRAPH
private: private:
int startAxis, endAxis; int startAxis, endAxis;
}; };

View File

@ -14,6 +14,8 @@ Implementation of padding layer, which adds paddings to input blob.
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <vector> #include <vector>
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
@ -100,7 +102,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
(preferableTarget != DNN_TARGET_MYRIAD || (preferableTarget != DNN_TARGET_MYRIAD ||
(dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0)); (dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
@ -235,6 +237,29 @@ public:
} }
#endif #endif
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<int64_t> begins(paddings.size(), 0), ends(paddings.size(), 0);
for (int i = 0; i < paddings.size(); ++i)
{
begins[i] = static_cast<int64_t>(paddings[i].first);
ends[i] = static_cast<int64_t>(paddings[i].second);
}
auto padding_below = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{begins.size()}, begins.data());
auto padding_above = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{ends.size()}, ends.data());
auto pad_mode = paddingType == "constant" ? ngraph::op::PadMode::CONSTANT : ngraph::op::PadMode::REFLECT; // SYMMETRIC
auto arg_pad_value = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{}, &paddingValue);;
auto pad = paddingType == "constant" ?
std::make_shared<ngraph::op::v1::Pad>(ieInpNode, padding_below, padding_above, arg_pad_value, pad_mode) :
std::make_shared<ngraph::op::v1::Pad>(ieInpNode, padding_below, padding_above, pad_mode);
return Ptr<BackendNode>(new InfEngineNgraphNode(pad));
}
#endif
private: private:
std::vector<std::pair<int, int> > paddings; // Pairs pad before, pad after. std::vector<std::pair<int, int> > paddings; // Pairs pad before, pad after.
std::vector<Range> dstRanges; std::vector<Range> dstRanges;

View File

@ -44,7 +44,9 @@
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "../op_vkcom.hpp" #include "../op_vkcom.hpp"
#include <float.h> #include <float.h>
#include <algorithm> #include <algorithm>
@ -113,7 +115,7 @@ public:
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()) || ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine()) ||
(backendId == DNN_BACKEND_VKCOM && haveVulkan()); (backendId == DNN_BACKEND_VKCOM && haveVulkan());
} }
@ -410,6 +412,18 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto tr_axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape({_order.size()}), _order.data());
auto transpose = std::make_shared<ngraph::op::Transpose>(ieInpNode, tr_axes);
return Ptr<BackendNode>(new InfEngineNgraphNode(transpose));
}
#endif // HAVE_DNN_NGRAPH
size_t _count; size_t _count;
std::vector<size_t> _order; std::vector<size_t> _order;

View File

@ -46,7 +46,15 @@
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#ifdef HAVE_DNN_NGRAPH
#include "../ie_ngraph.hpp"
#include <ngraph/op/experimental/layers/roi_pooling.hpp>
#include <ngraph/op/experimental/layers/psroi_pooling.hpp>
#endif
#include "../op_vkcom.hpp" #include "../op_vkcom.hpp"
#include <float.h> #include <float.h>
#include <algorithm> #include <algorithm>
#include <numeric> #include <numeric>
@ -172,7 +180,7 @@ public:
{ {
return type == MAX || type == AVE; return type == MAX || type == AVE;
} }
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE) else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (computeMaxIdx) if (computeMaxIdx)
return false; return false;
@ -193,6 +201,9 @@ public:
return false; return false;
#endif #endif
} }
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
return type != STOCHASTIC;
}
else else
{ {
if (kernel_size.size() == 3) if (kernel_size.size() == 3)
@ -482,6 +493,50 @@ public:
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert_N((inputs.size() == 1 && (type == MAX || type == AVE)) || inputs.size() == 2, nodes.size() == inputs.size());
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
ngraph::op::PadType pad_type = ngraph::op::PadType::EXPLICIT;
if (!padMode.empty())
pad_type = padMode == "VALID" ? ngraph::op::PadType::VALID : ngraph::op::PadType::SAME_UPPER;
auto rounding_type = ceilMode ? ngraph::op::RoundingType::CEIL : ngraph::op::RoundingType::FLOOR;
if (type == AVE) {
auto exclude_pad = !avePoolPaddedArea;
auto ave_pool = std::make_shared<ngraph::op::v1::AvgPool>(ieInpNode, ngraph::Strides(strides),
ngraph::Shape(pads_begin), ngraph::Shape(pads_end), ngraph::Shape(kernel_size),
exclude_pad, rounding_type, pad_type);
return Ptr<BackendNode>(new InfEngineNgraphNode(ave_pool));
}
else if (type == MAX) {
auto max_pool = std::make_shared<ngraph::op::v1::MaxPool>(ieInpNode, ngraph::Strides(strides),
ngraph::Shape(pads_begin), ngraph::Shape(pads_end), ngraph::Shape(kernel_size),
rounding_type, pad_type);
return Ptr<BackendNode>(new InfEngineNgraphNode(max_pool));
}
else if (type == ROI) {
auto& coords = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
auto roi = std::make_shared<ngraph::op::ROIPooling>(ieInpNode, coords,
ngraph::Shape{(size_t)pooledSize.height, (size_t)pooledSize.width}, spatialScale, "max");
return Ptr<BackendNode>(new InfEngineNgraphNode(roi));
}
else if (type == PSROI) {
auto& coords = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
auto psroi = std::make_shared<ngraph::op::PSROIPooling>(ieInpNode, coords,
(size_t)psRoiOutChannels, (size_t)pooledSize.width, spatialScale, 1, 1, "average");
return Ptr<BackendNode>(new InfEngineNgraphNode(psroi));
}
else
CV_Error(Error::StsNotImplemented, "Unsupported pooling type");
}
#endif // HAVE_DNN_NGRAPH
class PoolingInvoker : public ParallelLoopBody class PoolingInvoker : public ParallelLoopBody
{ {
public: public:

View File

@ -44,7 +44,15 @@
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#ifdef HAVE_DNN_NGRAPH
#include "../ie_ngraph.hpp"
#include <ngraph/op/experimental/layers/prior_box.hpp>
#include <ngraph/op/experimental/layers/prior_box_clustered.hpp>
#endif
#include "../op_vkcom.hpp" #include "../op_vkcom.hpp"
#include <float.h> #include <float.h>
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
@ -279,9 +287,13 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
#ifdef HAVE_DNN_NGRAPH
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return _explicitSizes || _stepX == _stepY;
#endif
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && haveInfEngine() &&
( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1))) ( _explicitSizes || (_minSize.size() == 1 && _maxSize.size() <= 1)))
|| (backendId == DNN_BACKEND_VKCOM && haveVulkan()); || (backendId == DNN_BACKEND_VKCOM && haveVulkan());
} }
@ -605,6 +617,66 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(nodes.size() == 2);
auto layer = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto image = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
auto layer_shape = std::make_shared<ngraph::op::ShapeOf>(layer);
auto image_shape = std::make_shared<ngraph::op::ShapeOf>(image);
auto lower_bounds = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{2});
auto upper_bounds = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{4});
auto strides = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{1});
auto slice_layer = std::make_shared<ngraph::op::DynSlice>(layer_shape, lower_bounds, upper_bounds, strides);
auto slice_image = std::make_shared<ngraph::op::DynSlice>(image_shape, lower_bounds, upper_bounds, strides);
if (_explicitSizes)
{
CV_Assert_N(!_boxWidths.empty(), !_boxHeights.empty(), !_variance.empty());
CV_Assert(_boxWidths.size() == _boxHeights.size());
ngraph::op::PriorBoxClusteredAttrs attrs;
attrs.widths = _boxWidths;
attrs.heights = _boxHeights;
attrs.clip = _clip;
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
attrs.offset = _offsetsX[0];
attrs.step_heights = _stepY;
attrs.step_widths = _stepX;
attrs.variances = _variance;
auto priorBox = std::make_shared<ngraph::op::PriorBoxClustered>(slice_layer, slice_image, attrs);
auto axis = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
auto unsqueeze = std::make_shared<ngraph::op::Unsqueeze>(priorBox, axis);
return Ptr<BackendNode>(new InfEngineNgraphNode(unsqueeze));
}
else
{
ngraph::op::PriorBoxAttrs attrs;
attrs.min_size = _minSize;
attrs.max_size = _maxSize;
// doesn't work with empty aspectRatio
attrs.aspect_ratio = !_aspectRatios.empty()? _aspectRatios : std::vector<float>{1.0f};
attrs.clip = _clip;
attrs.flip = false;
attrs.variance = _variance;
CV_CheckEQ(_offsetsX.size(), (size_t)1, ""); CV_CheckEQ(_offsetsY.size(), (size_t)1, ""); CV_CheckEQ(_offsetsX[0], _offsetsY[0], "");
attrs.offset = _offsetsX[0];
attrs.step = _stepX;
attrs.scale_all_sizes = !_aspectRatios.empty();
auto priorBox = std::make_shared<ngraph::op::PriorBox>(slice_layer, slice_image, attrs);
auto axis = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{1}, std::vector<int64_t>{0});
auto unsqueeze = std::make_shared<ngraph::op::Unsqueeze>(priorBox, axis);
return Ptr<BackendNode>(new InfEngineNgraphNode(unsqueeze));
}
}
#endif // HAVE_DNN_NGRAPH
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

View File

@ -8,6 +8,11 @@
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#ifdef HAVE_DNN_NGRAPH
#include "../ie_ngraph.hpp"
#include <ngraph/op/experimental/layers/proposal.hpp>
#endif
namespace cv { namespace dnn { namespace cv { namespace dnn {
class ProposalLayerImpl CV_FINAL : public ProposalLayer class ProposalLayerImpl CV_FINAL : public ProposalLayer
@ -87,7 +92,7 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && preferableTarget != DNN_TARGET_MYRIAD); ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && preferableTarget != DNN_TARGET_MYRIAD);
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs, bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -348,6 +353,45 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(nodes.size() == 3);
ngraph::op::ProposalAttrs attr;
attr.base_size = baseSize;
attr.nms_thresh = nmsThreshold;
attr.feat_stride = featStride;
attr.min_size = 16;
attr.pre_nms_topn = keepTopBeforeNMS;
attr.post_nms_topn = keepTopAfterNMS;
std::vector<float> ratiosVec(ratios.size());
for (int i = 0; i < ratios.size(); ++i)
ratiosVec[i] = ratios.get<float>(i);
attr.ratio = ratiosVec;
std::vector<float> scalesVec(scales.size());
for (int i = 0; i < scales.size(); ++i)
scalesVec[i] = scales.get<float>(i);
attr.scale = scalesVec;
auto& class_probs = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto& class_logits = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
auto& image_shape = nodes[2].dynamicCast<InfEngineNgraphNode>()->node;
CV_Assert_N(image_shape->get_shape().size() == 2, image_shape->get_shape().front() == 1);
auto shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{1},
std::vector<int64_t>{(int64_t)image_shape->get_shape().back()});
auto reshape = std::make_shared<ngraph::op::v1::Reshape>(image_shape, shape, true);
auto proposal = std::make_shared<ngraph::op::Proposal>(class_probs, class_logits, reshape, attr);
return Ptr<BackendNode>(new InfEngineNgraphNode(proposal));
}
#endif // HAVE_DNN_NGRAPH
private: private:
// A first half of channels are background scores. We need only a second one. // A first half of channels are background scores. We need only a second one.
static Mat getObjectScores(const Mat& m) static Mat getObjectScores(const Mat& m)

View File

@ -43,6 +43,12 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#ifdef HAVE_DNN_NGRAPH
#include "../ie_ngraph.hpp"
#include <ngraph/op/experimental/layers/reorg_yolo.hpp>
#endif
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#include <opencv2/dnn/all_layers.hpp> #include <opencv2/dnn/all_layers.hpp>
@ -143,7 +149,8 @@ public:
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_INFERENCE_ENGINE; backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
#ifdef HAVE_OPENCL #ifdef HAVE_OPENCL
@ -207,6 +214,16 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
auto reorg = std::make_shared<ngraph::op::ReorgYolo>(ieInpNode, ngraph::Strides{(size_t)reorgStride});
return Ptr<BackendNode>(new InfEngineNgraphNode(reorg));
}
#endif // HAVE_DNN_NGRAPH
virtual int64 getFLOPS(const std::vector<MatShape> &inputs, virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

View File

@ -44,6 +44,8 @@
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
@ -186,7 +188,7 @@ public:
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine());
} }
bool getMemoryShapes(const std::vector<MatShape> &inputs, bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -287,6 +289,21 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(outShapes.size() == 1);
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<int64_t> out(outShapes[0].begin(), outShapes[0].end());
auto shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{out.size()}, out.data());
auto reshape = std::make_shared<ngraph::op::v1::Reshape>(ieInpNode, shape, true);
return Ptr<BackendNode>(new InfEngineNgraphNode(reshape));
}
#endif // HAVE_DNN_NGRAPH
private: private:
std::vector<MatShape> outShapes; std::vector<MatShape> outShapes;
}; };

View File

@ -10,6 +10,11 @@
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include <opencv2/imgproc.hpp> #include <opencv2/imgproc.hpp>
#ifdef HAVE_DNN_NGRAPH
#include "../ie_ngraph.hpp"
#include <ngraph/op/experimental/layers/interpolate.hpp>
#endif
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
#include "../cuda4dnn/primitives/resize.hpp" #include "../cuda4dnn/primitives/resize.hpp"
using namespace cv::dnn::cuda4dnn; using namespace cv::dnn::cuda4dnn;
@ -61,7 +66,8 @@ public:
return interpolation == "nearest" || interpolation == "bilinear"; return interpolation == "nearest" || interpolation == "bilinear";
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
{ {
return (interpolation == "nearest" && scaleWidth == scaleHeight) || return (interpolation == "nearest" && scaleWidth == scaleHeight) ||
(interpolation == "bilinear"); (interpolation == "bilinear");
@ -221,6 +227,35 @@ public:
return Ptr<BackendNode>(); return Ptr<BackendNode>();
} }
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
ngraph::op::InterpolateAttrs attrs;
attrs.pads_begin.push_back(0);
attrs.pads_end.push_back(0);
attrs.axes = ngraph::AxisSet{2, 3};
attrs.align_corners = false;
if (interpolation == "nearest") {
attrs.mode = "nearest";
attrs.antialias = false;
} else if (interpolation == "bilinear") {
attrs.mode = "linear";
} else {
CV_Error(Error::StsNotImplemented, "Unsupported interpolation: " + interpolation);
}
std::vector<int64_t> shape = {outHeight, outWidth};
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
auto interp = std::make_shared<ngraph::op::Interpolate>(ieInpNode, out_shape, attrs);
return Ptr<BackendNode>(new InfEngineNgraphNode(interp));
}
#endif // HAVE_DNN_NGRAPH
protected: protected:
int outWidth, outHeight, zoomFactorWidth, zoomFactorHeight; int outWidth, outHeight, zoomFactorWidth, zoomFactorHeight;
String interpolation; String interpolation;
@ -254,8 +289,12 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
#ifdef HAVE_INF_ENGINE
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
|| backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return true;
#endif
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE ||
backendId == DNN_BACKEND_CUDA; backendId == DNN_BACKEND_CUDA;
} }
@ -292,6 +331,23 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
ngraph::op::InterpolateAttrs attrs;
attrs.pads_begin.push_back(0);
attrs.pads_end.push_back(0);
attrs.axes = ngraph::AxisSet{2, 3};
attrs.mode = "linear";
std::vector<int64_t> shape = {outHeight, outWidth};
auto out_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{2}, shape.data());
auto interp = std::make_shared<ngraph::op::Interpolate>(ieInpNode, out_shape, attrs);
return Ptr<BackendNode>(new InfEngineNgraphNode(interp));
}
#endif // HAVE_DNN_NGRAPH
}; };
Ptr<Layer> InterpLayer::create(const LayerParams& params) Ptr<Layer> InterpLayer::create(const LayerParams& params)

View File

@ -14,6 +14,8 @@ Implementation of Scale layer.
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
@ -59,7 +61,7 @@ public:
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_HALIDE ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && axis == 1); ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && axis == 1);
} }
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
@ -252,6 +254,34 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(!blobs.empty());
const size_t numChannels = blobs[0].total();
auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> shape(ieInpNode->get_shape().size(), 1);
shape[1] = numChannels;
auto weight = hasWeights ?
std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape(shape), blobs[0].data) :
std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape(shape), std::vector<float>(numChannels, 1).data());
auto bias = hasBias ?
std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape(shape), blobs.back().data) :
std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
ngraph::Shape(shape), std::vector<float>(numChannels, 0).data());
auto scale_node = std::make_shared<ngraph::op::v1::Multiply>(ieInpNode, weight, ngraph::op::AutoBroadcastType::NUMPY);
auto scale_shift = std::make_shared<ngraph::op::v1::Add>(scale_node, bias, ngraph::op::AutoBroadcastType::NUMPY);
return Ptr<BackendNode>(new InfEngineNgraphNode(scale_shift));
}
#endif // HAVE_DNN_NGRAPH
void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE
{ {
scale = hasWeights ? blobs[0] : Mat(); scale = hasWeights ? blobs[0] : Mat();

View File

@ -43,6 +43,8 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
@ -119,7 +121,7 @@ public:
{ {
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) &&
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) && INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
#endif #endif
@ -345,6 +347,35 @@ public:
} }
#endif #endif
#endif #endif
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert_N(nodes.size() <= 2);
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
CV_Assert(sliceRanges[0].size() == ieInpNode->get_shape().size());
std::vector<int64_t> offsets, dims;
for (int i = 0; i < sliceRanges[0].size(); ++i)
{
offsets.push_back(sliceRanges[0][i].start);
dims.push_back(sliceRanges[0][i].end);
}
auto lower_bounds = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{offsets.size()}, offsets.data());
auto upper_bounds = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{dims.size()}, dims.data());
auto strides = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
ngraph::Shape{dims.size()}, std::vector<int64_t>((int64_t)dims.size(), 1));
auto slice = std::make_shared<ngraph::op::DynSlice>(ieInpNode, lower_bounds, upper_bounds,
strides, ngraph::AxisSet{}, ngraph::AxisSet{});
return Ptr<BackendNode>(new InfEngineNgraphNode(slice));
}
#endif // HAVE_DNN_NGRAPH
}; };
class CropLayerImpl CV_FINAL : public SliceLayerImpl class CropLayerImpl CV_FINAL : public SliceLayerImpl

View File

@ -45,7 +45,9 @@
#include "../op_cuda.hpp" #include "../op_cuda.hpp"
#include "../op_halide.hpp" #include "../op_halide.hpp"
#include "../op_inf_engine.hpp" #include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "../op_vkcom.hpp" #include "../op_vkcom.hpp"
#include <algorithm> #include <algorithm>
#include <stdlib.h> #include <stdlib.h>
using std::max; using std::max;
@ -98,7 +100,7 @@ public:
return backendId == DNN_BACKEND_OPENCV || return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_CUDA ||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) || (backendId == DNN_BACKEND_HALIDE && haveHalide() && axisRaw == 1) ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() && !logSoftMax) || ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && haveInfEngine() && !logSoftMax) ||
(backendId == DNN_BACKEND_VKCOM && haveVulkan()); (backendId == DNN_BACKEND_VKCOM && haveVulkan());
} }
@ -357,6 +359,17 @@ public:
} }
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
int axis = clamp(axisRaw, ieInpNode->get_shape().size());
auto softmax = std::make_shared<ngraph::op::v1::Softmax>(ieInpNode, axis);
return Ptr<BackendNode>(new InfEngineNgraphNode(softmax));
}
#endif // HAVE_DNN_NGRAPH
int64 getFLOPS(const std::vector<MatShape> &inputs, int64 getFLOPS(const std::vector<MatShape> &inputs,
const std::vector<MatShape> &outputs) const CV_OVERRIDE const std::vector<MatShape> &outputs) const CV_OVERRIDE
{ {

View File

@ -21,6 +21,54 @@ namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
static Backend parseInferenceEngineBackendType(const cv::String& backend)
{
CV_Assert(!backend.empty());
if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
if (backend == CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API)
return DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019;
CV_Error(Error::StsBadArg, cv::format("Unknown IE backend: %s", backend.c_str()));
}
static const char* dumpInferenceEngineBackendType(Backend backend)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
return CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
return CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API;
CV_Error(Error::StsBadArg, cv::format("Invalid backend ID for IE: %d", backend));
}
Backend& getInferenceEngineBackendTypeParam()
{
static Backend param = parseInferenceEngineBackendType(
utils::getConfigurationParameterString("OPENCV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019_TYPE",
#ifdef HAVE_NGRAPH
CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API // future: CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH
#else
CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API
#endif
)
);
return param;
}
CV__DNN_INLINE_NS_BEGIN
cv::String getInferenceEngineBackendType()
{
return dumpInferenceEngineBackendType(getInferenceEngineBackendTypeParam());
}
cv::String setInferenceEngineBackendType(const cv::String& newBackendType)
{
Backend newBackend = parseInferenceEngineBackendType(newBackendType);
Backend& param = getInferenceEngineBackendTypeParam();
Backend old = param;
param = newBackend;
return dumpInferenceEngineBackendType(old);
}
CV__DNN_INLINE_NS_END
// For networks with input layer which has an empty name, IE generates a name id[some_number]. // For networks with input layer which has an empty name, IE generates a name id[some_number].
// OpenCV lets users use an empty input name and to prevent unexpected naming, // OpenCV lets users use an empty input name and to prevent unexpected naming,
// we can use some predefined name. // we can use some predefined name.
@ -161,38 +209,25 @@ private:
InferenceEngine::CNNLayer cnnLayer; InferenceEngine::CNNLayer cnnLayer;
}; };
class InfEngineExtension : public InferenceEngine::IExtension InferenceEngine::StatusCode InfEngineExtension::getFactoryFor(
InferenceEngine::ILayerImplFactory*& factory,
const InferenceEngine::CNNLayer* cnnLayer,
InferenceEngine::ResponseDesc* resp
) noexcept
{ {
public: if (cnnLayer->type != kOpenCVLayersType)
virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {} return InferenceEngine::StatusCode::NOT_IMPLEMENTED;
virtual void Unload() noexcept {} factory = new InfEngineCustomLayerFactory(cnnLayer);
virtual void Release() noexcept {} return InferenceEngine::StatusCode::OK;
virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {} }
virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
InferenceEngine::ResponseDesc*) noexcept
{
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
const InferenceEngine::CNNLayer* cnnLayer,
InferenceEngine::ResponseDesc* resp) noexcept
{
if (cnnLayer->type != kOpenCVLayersType)
return InferenceEngine::StatusCode::NOT_IMPLEMENTED;
factory = new InfEngineCustomLayerFactory(cnnLayer);
return InferenceEngine::StatusCode::OK;
}
};
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer) InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {} : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019), layer(_layer) {}
InfEngineBackendNode::InfEngineBackendNode(Ptr<Layer>& cvLayer_, std::vector<Mat*>& inputs, InfEngineBackendNode::InfEngineBackendNode(Ptr<Layer>& cvLayer_, std::vector<Mat*>& inputs,
std::vector<Mat>& outputs, std::vector<Mat>& outputs,
std::vector<Mat>& internals) std::vector<Mat>& internals)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(cvLayer_->name), : BackendNode(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019), layer(cvLayer_->name),
cvLayer(cvLayer_) cvLayer(cvLayer_)
{ {
CV_Assert(!cvLayer->name.empty()); CV_Assert(!cvLayer->name.empty());
@ -269,7 +304,7 @@ void InfEngineBackendNet::connect(const std::vector<Ptr<BackendWrapper> >& input
#endif #endif
} }
void InfEngineBackendNet::init(int targetId) void InfEngineBackendNet::init(Target targetId)
{ {
if (!hasNetOwner) if (!hasNetOwner)
{ {
@ -403,7 +438,7 @@ static InferenceEngine::Layout estimateLayout(const Mat& m)
static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "") static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std::string& name = "")
{ {
std::vector<size_t> shape(&m.size[0], &m.size[0] + m.dims); std::vector<size_t> shape = getShape<size_t>(m);
if (m.type() == CV_32F) if (m.type() == CV_32F)
return InferenceEngine::DataPtr(new InferenceEngine::Data(name, return InferenceEngine::DataPtr(new InferenceEngine::Data(name,
{InferenceEngine::Precision::FP32, shape, estimateLayout(m)})); {InferenceEngine::Precision::FP32, shape, estimateLayout(m)}));
@ -429,7 +464,7 @@ InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<s
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout) InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout)
{ {
std::vector<size_t> shape(&m.size[0], &m.size[0] + m.dims); std::vector<size_t> shape = getShape<size_t>(m);
return wrapToInfEngineBlob(m, shape, layout); return wrapToInfEngineBlob(m, shape, layout);
} }
@ -461,14 +496,14 @@ InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr)
} }
InfEngineBackendWrapper::InfEngineBackendWrapper(int targetId, const cv::Mat& m) InfEngineBackendWrapper::InfEngineBackendWrapper(int targetId, const cv::Mat& m)
: BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE, targetId) : BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, targetId)
{ {
dataPtr = wrapToInfEngineDataNode(m); dataPtr = wrapToInfEngineDataNode(m);
blob = wrapToInfEngineBlob(m, estimateLayout(m)); blob = wrapToInfEngineBlob(m, estimateLayout(m));
} }
InfEngineBackendWrapper::InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper) InfEngineBackendWrapper::InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper)
: BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE, wrapper->targetId) : BackendWrapper(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, wrapper->targetId)
{ {
Ptr<InfEngineBackendWrapper> ieWrapper = wrapper.dynamicCast<InfEngineBackendWrapper>(); Ptr<InfEngineBackendWrapper> ieWrapper = wrapper.dynamicCast<InfEngineBackendWrapper>();
CV_Assert(!ieWrapper.empty()); CV_Assert(!ieWrapper.empty());
@ -506,7 +541,7 @@ static std::map<std::string, InferenceEngine::InferenceEnginePluginPtr>& getShar
return sharedPlugins; return sharedPlugins;
} }
#else #else
static InferenceEngine::Core& getCore() InferenceEngine::Core& getCore()
{ {
static InferenceEngine::Core core; static InferenceEngine::Core core;
return core; return core;
@ -565,7 +600,11 @@ static bool detectMyriadX_()
#else #else
try try
{ {
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R3)
auto netExec = getCore().LoadNetwork(cnn, "MYRIAD", {{"VPU_PLATFORM", "VPU_2480"}}); auto netExec = getCore().LoadNetwork(cnn, "MYRIAD", {{"VPU_PLATFORM", "VPU_2480"}});
#else
auto netExec = getCore().LoadNetwork(cnn, "MYRIAD", {{"VPU_MYRIAD_PLATFORM", "VPU_MYRIAD_2480"}});
#endif
#endif #endif
auto infRequest = netExec.CreateInferRequest(); auto infRequest = netExec.CreateInferRequest();
} catch(...) { } catch(...) {
@ -704,7 +743,7 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
} }
catch (const std::exception& ex) catch (const std::exception& ex)
{ {
CV_Error(Error::StsAssert, format("Failed to initialize Inference Engine backend: %s", ex.what())); CV_Error(Error::StsError, format("Failed to initialize Inference Engine backend (device = %s): %s", device_name.c_str(), ex.what()));
} }
} }
@ -744,6 +783,7 @@ void InfEngineBackendNet::InfEngineReqWrapper::makePromises(const std::vector<Pt
void InfEngineBackendNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, void InfEngineBackendNet::forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
bool isAsync) bool isAsync)
{ {
CV_LOG_DEBUG(NULL, "InfEngineBackendNet::forward(" << (isAsync ? "async" : "sync") << ")");
// Look for finished requests. // Look for finished requests.
Ptr<InfEngineReqWrapper> reqWrapper; Ptr<InfEngineReqWrapper> reqWrapper;
for (auto& wrapper : infRequests) for (auto& wrapper : infRequests)
@ -791,6 +831,8 @@ void InfEngineBackendNet::forward(const std::vector<Ptr<BackendWrapper> >& outBl
infRequestPtr->SetCompletionCallback( infRequestPtr->SetCompletionCallback(
[](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status) [](InferenceEngine::IInferRequest::Ptr request, InferenceEngine::StatusCode status)
{ {
CV_LOG_DEBUG(NULL, "DNN(IE): completionCallback(" << (int)status << ")");
InfEngineReqWrapper* wrapper; InfEngineReqWrapper* wrapper;
request->GetUserData((void**)&wrapper, 0); request->GetUserData((void**)&wrapper, 0);
CV_Assert(wrapper && "Internal error"); CV_Assert(wrapper && "Internal error");
@ -916,8 +958,9 @@ bool InfEngineBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
bool InfEngineBackendLayer::supportBackend(int backendId) bool InfEngineBackendLayer::supportBackend(int backendId)
{ {
CV_LOG_DEBUG(NULL, "InfEngineBackendLayer::supportBackend(" << backendId << ")");
return backendId == DNN_BACKEND_DEFAULT || return backendId == DNN_BACKEND_DEFAULT ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine()); (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
} }
void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs, void InfEngineBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
@ -1030,7 +1073,18 @@ cv::String getInferenceEngineVPUType()
static cv::String vpu_type = getInferenceEngineVPUType_(); static cv::String vpu_type = getInferenceEngineVPUType_();
return vpu_type; return vpu_type;
} }
#else // HAVE_INF_ENGINE #else // HAVE_INF_ENGINE
cv::String getInferenceEngineBackendType()
{
CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support");
}
cv::String setInferenceEngineBackendType(const cv::String& newBackendType)
{
CV_UNUSED(newBackendType);
CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support");
}
cv::String getInferenceEngineVPUType() cv::String getInferenceEngineVPUType()
{ {
CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support"); CV_Error(Error::StsNotImplemented, "This OpenCV build doesn't include InferenceEngine support");

View File

@ -71,6 +71,8 @@ namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
Backend& getInferenceEngineBackendTypeParam();
class InfEngineBackendNet class InfEngineBackendNet
{ {
public: public:
@ -88,7 +90,7 @@ public:
bool isInitialized(); bool isInitialized();
void init(int targetId); void init(Target targetId);
void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers,
bool isAsync); bool isAsync);
@ -210,12 +212,45 @@ private:
InferenceEngine::CNNNetwork t_net; InferenceEngine::CNNNetwork t_net;
}; };
class InfEngineExtension : public InferenceEngine::IExtension
{
public:
virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
virtual void Unload() noexcept {}
virtual void Release() noexcept {}
virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
InferenceEngine::ResponseDesc*) noexcept
{
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
const InferenceEngine::CNNLayer* cnnLayer,
InferenceEngine::ResponseDesc* resp) noexcept;
};
CV__DNN_INLINE_NS_BEGIN CV__DNN_INLINE_NS_BEGIN
bool isMyriadX(); bool isMyriadX();
CV__DNN_INLINE_NS_END CV__DNN_INLINE_NS_END
InferenceEngine::Core& getCore();
template<typename T = size_t>
static inline std::vector<T> getShape(const Mat& mat)
{
std::vector<T> result(mat.dims);
for (int i = 0; i < mat.dims; i++)
result[i] = (T)mat.size[i];
return result;
}
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
bool haveInfEngine(); bool haveInfEngine();

View File

@ -1350,6 +1350,8 @@ void TFImporter::populateNet(Net dstNet)
setKSize(layerParams, layer); setKSize(layerParams, layer);
setStrides(layerParams, layer); setStrides(layerParams, layer);
setPadding(layerParams, layer); setPadding(layerParams, layer);
// Test_TensorFlow_nets.EAST_text_detection/1, NGRAPH/CPU
layerParams.set("ceil_mode", false);
int id = dstNet.addLayer(name, "Pooling", layerParams); int id = dstNet.addLayer(name, "Pooling", layerParams);
layer_id[name] = id; layer_id[name] = id;

View File

@ -78,7 +78,7 @@ public:
{ {
if (outputLayer == "detection_out") if (outputLayer == "detection_out")
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
// Inference Engine produces detections terminated by a row which starts from -1. // Inference Engine produces detections terminated by a row which starts from -1.
out = out.reshape(1, out.total() / 7); out = out.reshape(1, out.total() / 7);
@ -146,7 +146,7 @@ TEST_P(DNNTestNetwork, Inception_5h)
{ {
applyTestTag(CV_TEST_TAG_MEMORY_512MB); applyTestTag(CV_TEST_TAG_MEMORY_512MB);
double l1 = default_l1, lInf = default_lInf; double l1 = default_l1, lInf = default_lInf;
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL)) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL))
{ {
l1 = 1.72e-5; l1 = 1.72e-5;
lInf = 8e-4; lInf = 8e-4;
@ -162,8 +162,10 @@ TEST_P(DNNTestNetwork, Inception_5h)
TEST_P(DNNTestNetwork, ENet) TEST_P(DNNTestNetwork, ENet)
{ {
applyTestTag(target == DNN_TARGET_CPU ? "" : CV_TEST_TAG_MEMORY_512MB); applyTestTag(target == DNN_TARGET_CPU ? "" : CV_TEST_TAG_MEMORY_512MB);
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
processNet("dnn/Enet-model-best.net", "", Size(512, 512), "l367_Deconvolution", processNet("dnn/Enet-model-best.net", "", Size(512, 512), "l367_Deconvolution",
@ -193,7 +195,7 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe_Different_Width_Height)
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif #endif
@ -227,13 +229,13 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow_Different_Width_Height)
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif #endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
Mat sample = imread(findDataFile("dnn/street.png")); Mat sample = imread(findDataFile("dnn/street.png"));
@ -282,9 +284,9 @@ TEST_P(DNNTestNetwork, OpenPose_pose_coco)
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.0056 : 0.0; const float l1 = (target == DNN_TARGET_MYRIAD) ? 0.0056 : 0.0;
@ -302,9 +304,9 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi)
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
// output range: [-0.001, 0.97] // output range: [-0.001, 0.97]
@ -322,9 +324,9 @@ TEST_P(DNNTestNetwork, OpenPose_pose_mpi_faster_4_stages)
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
// The same .caffemodel but modified .prototxt // The same .caffemodel but modified .prototxt
@ -339,8 +341,8 @@ TEST_P(DNNTestNetwork, OpenFace)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_VER_MAJOR_EQ(2018050000) #if INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
#endif #endif
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
@ -370,13 +372,13 @@ TEST_P(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
CV_TEST_TAG_DEBUG_LONG CV_TEST_TAG_DEBUG_LONG
); );
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#endif #endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
@ -416,13 +418,13 @@ TEST_P(DNNTestNetwork, FastNeuralStyle_eccv16)
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_VER_MAJOR_LE(2018050000) #if INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
#endif #endif

View File

@ -112,8 +112,10 @@ TEST(Test_Caffe, read_googlenet)
TEST_P(Test_Caffe_nets, Axpy) TEST_P(Test_Caffe_nets, Axpy)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
String proto = _tf("axpy.prototxt"); String proto = _tf("axpy.prototxt");
Net net = readNetFromCaffe(proto); Net net = readNetFromCaffe(proto);
@ -299,7 +301,7 @@ TEST_P(Reproducibility_MobileNet_SSD, Accuracy)
} }
// There is something wrong with Reshape layer in Myriad plugin. // There is something wrong with Reshape layer in Myriad plugin.
if (backendId == DNN_BACKEND_INFERENCE_ENGINE) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (targetId == DNN_TARGET_MYRIAD || targetId == DNN_TARGET_OPENCL_FP16) if (targetId == DNN_TARGET_MYRIAD || targetId == DNN_TARGET_OPENCL_FP16)
return; return;
@ -627,10 +629,10 @@ TEST_P(Test_Caffe_nets, FasterRCNN_vgg16)
); );
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif #endif
@ -646,9 +648,9 @@ TEST_P(Test_Caffe_nets, FasterRCNN_zf)
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB), (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
CV_TEST_TAG_DEBUG_LONG CV_TEST_TAG_DEBUG_LONG
); );
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
static Mat ref = (Mat_<float>(3, 7) << 0, 2, 0.90121, 120.407, 115.83, 570.586, 528.395, static Mat ref = (Mat_<float>(3, 7) << 0, 2, 0.90121, 120.407, 115.83, 570.586, 528.395,
0, 7, 0.988779, 469.849, 75.1756, 718.64, 186.762, 0, 7, 0.988779, 469.849, 75.1756, 718.64, 186.762,
@ -663,9 +665,9 @@ TEST_P(Test_Caffe_nets, RFCN)
CV_TEST_TAG_LONG, CV_TEST_TAG_LONG,
CV_TEST_TAG_DEBUG_VERYLONG CV_TEST_TAG_DEBUG_VERYLONG
); );
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
double scoreDiff = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 4e-3 : default_l1; double scoreDiff = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 4e-3 : default_l1;
double iouDiff = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 8e-2 : default_lInf; double iouDiff = (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) ? 8e-2 : default_lInf;

View File

@ -11,9 +11,19 @@
#include "opencv2/core/ocl.hpp" #include "opencv2/core/ocl.hpp"
#endif #endif
// src/op_inf_engine.hpp
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_LE(ver) (((INF_ENGINE_RELEASE) / 10000) <= ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
#define CV_TEST_TAG_DNN_SKIP_HALIDE "dnn_skip_halide" #define CV_TEST_TAG_DNN_SKIP_HALIDE "dnn_skip_halide"
#define CV_TEST_TAG_DNN_SKIP_OPENCL "dnn_skip_ocl" #define CV_TEST_TAG_DNN_SKIP_OPENCL "dnn_skip_ocl"
#define CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 "dnn_skip_ocl_fp16" #define CV_TEST_TAG_DNN_SKIP_OPENCL_FP16 "dnn_skip_ocl_fp16"
#define CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER "dnn_skip_ie_nn_builder"
#define CV_TEST_TAG_DNN_SKIP_IE_NGRAPH "dnn_skip_ie_ngraph"
#define CV_TEST_TAG_DNN_SKIP_IE "dnn_skip_ie" #define CV_TEST_TAG_DNN_SKIP_IE "dnn_skip_ie"
#define CV_TEST_TAG_DNN_SKIP_IE_2018R5 "dnn_skip_ie_2018r5" #define CV_TEST_TAG_DNN_SKIP_IE_2018R5 "dnn_skip_ie_2018r5"
#define CV_TEST_TAG_DNN_SKIP_IE_2019R1 "dnn_skip_ie_2019r1" #define CV_TEST_TAG_DNN_SKIP_IE_2019R1 "dnn_skip_ie_2019r1"
@ -32,6 +42,28 @@
#define CV_TEST_TAG_DNN_SKIP_CUDA_FP16 "dnn_skip_cuda_fp16" #define CV_TEST_TAG_DNN_SKIP_CUDA_FP16 "dnn_skip_cuda_fp16"
#define CV_TEST_TAG_DNN_SKIP_CUDA_FP32 "dnn_skip_cuda_fp32" #define CV_TEST_TAG_DNN_SKIP_CUDA_FP32 "dnn_skip_cuda_fp32"
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_VER_MAJOR_EQ(2018050000)
# define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2018R5
#elif INF_ENGINE_VER_MAJOR_EQ(2019010000)
# if INF_ENGINE_RELEASE < 2019010100
# define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1
# else
# define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1
# endif
#elif INF_ENGINE_VER_MAJOR_EQ(2019020000)
# define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R2
#elif INF_ENGINE_VER_MAJOR_EQ(2019030000)
# define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R3
#endif
#endif // HAVE_INF_ENGINE
#ifndef CV_TEST_TAG_DNN_SKIP_IE_VERSION
# define CV_TEST_TAG_DNN_SKIP_IE_VERSION CV_TEST_TAG_DNN_SKIP_IE
#endif
namespace cv { namespace dnn { namespace cv { namespace dnn {
CV__DNN_INLINE_NS_BEGIN CV__DNN_INLINE_NS_BEGIN
@ -92,9 +124,12 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
bool withHalide = false, bool withHalide = false,
bool withCpuOCV = true, bool withCpuOCV = true,
bool withVkCom = true, bool withVkCom = true,
bool withCUDA = true bool withCUDA = true,
bool withNgraph = true
); );
testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargetsIE();
class DNNTestLayer : public TestWithParam<tuple<Backend, Target> > class DNNTestLayer : public TestWithParam<tuple<Backend, Target> >
{ {
@ -126,7 +161,8 @@ public:
static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0) static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
&& target == DNN_TARGET_MYRIAD)
{ {
if (inp && ref && inp->dims == 4 && ref->dims == 4 && if (inp && ref && inp->dims == 4 && ref->dims == 4 &&
inp->size[0] != 1 && inp->size[0] != ref->size[0]) inp->size[0] != 1 && inp->size[0] != ref->size[0])
@ -137,7 +173,7 @@ public:
} }
} }
void expectNoFallbacks(Net& net) void expectNoFallbacks(Net& net, bool raiseError = true)
{ {
// Check if all the layers are supported with current backend and target. // Check if all the layers are supported with current backend and target.
// Some layers might be fused so their timings equal to zero. // Some layers might be fused so their timings equal to zero.
@ -146,20 +182,27 @@ public:
std::vector<String> names = net.getLayerNames(); std::vector<String> names = net.getLayerNames();
CV_Assert(names.size() == timings.size()); CV_Assert(names.size() == timings.size());
bool hasFallbacks = false;
for (int i = 0; i < names.size(); ++i) for (int i = 0; i < names.size(); ++i)
{ {
Ptr<dnn::Layer> l = net.getLayer(net.getLayerId(names[i])); Ptr<dnn::Layer> l = net.getLayer(net.getLayerId(names[i]));
bool fused = !timings[i]; bool fused = !timings[i];
if ((!l->supportBackend(backend) || l->preferableTarget != target) && !fused) if ((!l->supportBackend(backend) || l->preferableTarget != target) && !fused)
CV_Error(Error::StsNotImplemented, "Layer [" + l->name + "] of type [" + {
l->type + "] is expected to has backend implementation"); hasFallbacks = true;
std::cout << "FALLBACK: Layer [" << l->type << "]:[" << l->name << "] is expected to has backend implementation" << endl;
}
} }
if (hasFallbacks && raiseError)
CV_Error(Error::StsNotImplemented, "Implementation fallbacks are not expected in this test");
} }
void expectNoFallbacksFromIE(Net& net) void expectNoFallbacksFromIE(Net& net)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
expectNoFallbacks(net); expectNoFallbacks(net);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
expectNoFallbacks(net, false);
} }
void expectNoFallbacksFromCUDA(Net& net) void expectNoFallbacksFromCUDA(Net& net)
@ -178,11 +221,4 @@ protected:
} // namespace } // namespace
// src/op_inf_engine.hpp
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_LE(ver) (((INF_ENGINE_RELEASE) / 10000) <= ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
#endif #endif

View File

@ -23,10 +23,12 @@ void PrintTo(const cv::dnn::Backend& v, std::ostream* os)
switch (v) { switch (v) {
case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return; case DNN_BACKEND_DEFAULT: *os << "DEFAULT"; return;
case DNN_BACKEND_HALIDE: *os << "HALIDE"; return; case DNN_BACKEND_HALIDE: *os << "HALIDE"; return;
case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE"; return; case DNN_BACKEND_INFERENCE_ENGINE: *os << "DLIE*"; return;
case DNN_BACKEND_VKCOM: *os << "VKCOM"; return; case DNN_BACKEND_VKCOM: *os << "VKCOM"; return;
case DNN_BACKEND_OPENCV: *os << "OCV"; return; case DNN_BACKEND_OPENCV: *os << "OCV"; return;
case DNN_BACKEND_CUDA: *os << "CUDA"; return; case DNN_BACKEND_CUDA: *os << "CUDA"; return;
case DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019: *os << "DLIE"; return;
case DNN_BACKEND_INFERENCE_ENGINE_NGRAPH: *os << "NGRAPH"; return;
} // don't use "default:" to emit compiler warnings } // don't use "default:" to emit compiler warnings
*os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")"; *os << "DNN_BACKEND_UNKNOWN(" << (int)v << ")";
} }
@ -186,7 +188,8 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
bool withHalide /*= false*/, bool withHalide /*= false*/,
bool withCpuOCV /*= true*/, bool withCpuOCV /*= true*/,
bool withVkCom /*= true*/, bool withVkCom /*= true*/,
bool withCUDA /*= true*/ bool withCUDA /*= true*/,
bool withNgraph /*= true*/
) )
{ {
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
@ -204,14 +207,25 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
if (withInferenceEngine) if (withInferenceEngine)
{ {
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE); available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i) for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{ {
if (*i == DNN_TARGET_MYRIAD && !withVPU) if (*i == DNN_TARGET_MYRIAD && !withVPU)
continue; continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE, *i)); targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i));
} }
} }
if (withNgraph)
{
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD && !withVPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, *i));
}
}
#else #else
CV_UNUSED(withInferenceEngine); CV_UNUSED(withInferenceEngine);
#endif #endif
@ -245,6 +259,40 @@ testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTarget
return testing::ValuesIn(targets); return testing::ValuesIn(targets);
} }
testing::internal::ParamGenerator< tuple<Backend, Target> > dnnBackendsAndTargetsIE()
{
#ifdef HAVE_INF_ENGINE
bool withVPU = validateVPUType();
std::vector< tuple<Backend, Target> > targets;
std::vector< Target > available;
{
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD && !withVPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019, *i));
}
}
{
available = getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
for (std::vector< Target >::const_iterator i = available.begin(); i != available.end(); ++i)
{
if (*i == DNN_TARGET_MYRIAD && !withVPU)
continue;
targets.push_back(make_tuple(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, *i));
}
}
return testing::ValuesIn(targets);
#else
return testing::ValuesIn(std::vector< tuple<Backend, Target> >());
#endif
}
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
static std::string getTestInferenceEngineVPUType() static std::string getTestInferenceEngineVPUType()
@ -329,6 +377,7 @@ void initDNNTests()
); );
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
registerGlobalSkipTag( registerGlobalSkipTag(
CV_TEST_TAG_DNN_SKIP_IE,
#if INF_ENGINE_VER_MAJOR_EQ(2018050000) #if INF_ENGINE_VER_MAJOR_EQ(2018050000)
CV_TEST_TAG_DNN_SKIP_IE_2018R5, CV_TEST_TAG_DNN_SKIP_IE_2018R5,
#elif INF_ENGINE_VER_MAJOR_EQ(2019010000) #elif INF_ENGINE_VER_MAJOR_EQ(2019010000)
@ -341,7 +390,10 @@ void initDNNTests()
#elif INF_ENGINE_VER_MAJOR_EQ(2019030000) #elif INF_ENGINE_VER_MAJOR_EQ(2019030000)
CV_TEST_TAG_DNN_SKIP_IE_2019R3, CV_TEST_TAG_DNN_SKIP_IE_2019R3,
#endif #endif
CV_TEST_TAG_DNN_SKIP_IE #ifdef HAVE_DNN_NGRAPH
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH,
#endif
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER
); );
#endif #endif
registerGlobalSkipTag( registerGlobalSkipTag(

View File

@ -261,11 +261,11 @@ TEST_P(Test_Darknet_nets, YoloVoc)
applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_MEMORY_1GB); applyTestTag(CV_TEST_TAG_LONG, CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
#endif #endif
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); // need to update check function applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); // need to update check function
#endif #endif
@ -301,7 +301,7 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
applyTestTag(CV_TEST_TAG_MEMORY_512MB); applyTestTag(CV_TEST_TAG_MEMORY_512MB);
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); // need to update check function applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); // need to update check function
#endif #endif
@ -331,15 +331,23 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
#ifdef HAVE_INF_ENGINE #ifdef HAVE_INF_ENGINE
static const std::chrono::milliseconds async_timeout(10000); static const std::chrono::milliseconds async_timeout(10000);
typedef testing::TestWithParam<tuple<std::string, Target> > Test_Darknet_nets_async; typedef testing::TestWithParam<tuple<std::string, tuple<Backend, Target> > > Test_Darknet_nets_async;
TEST_P(Test_Darknet_nets_async, Accuracy) TEST_P(Test_Darknet_nets_async, Accuracy)
{ {
if (INF_ENGINE_VER_MAJOR_LT(2019020000)) Backend backendId = get<0>(get<1>(GetParam()));
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); Target targetId = get<1>(get<1>(GetParam()));
if (INF_ENGINE_VER_MAJOR_LT(2019020000) && backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
applyTestTag(CV_TEST_TAG_MEMORY_512MB); applyTestTag(CV_TEST_TAG_MEMORY_512MB);
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
std::string prefix = get<0>(GetParam()); std::string prefix = get<0>(GetParam());
int target = get<1>(GetParam());
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
const int numInputs = 2; const int numInputs = 2;
std::vector<Mat> inputs(numInputs); std::vector<Mat> inputs(numInputs);
@ -352,7 +360,8 @@ TEST_P(Test_Darknet_nets_async, Accuracy)
Net netSync = readNet(findDataFile("dnn/" + prefix + ".cfg"), Net netSync = readNet(findDataFile("dnn/" + prefix + ".cfg"),
findDataFile("dnn/" + prefix + ".weights", false)); findDataFile("dnn/" + prefix + ".weights", false));
netSync.setPreferableTarget(target); netSync.setPreferableBackend(backendId);
netSync.setPreferableTarget(targetId);
// Run synchronously. // Run synchronously.
std::vector<Mat> refs(numInputs); std::vector<Mat> refs(numInputs);
@ -364,7 +373,8 @@ TEST_P(Test_Darknet_nets_async, Accuracy)
Net netAsync = readNet(findDataFile("dnn/" + prefix + ".cfg"), Net netAsync = readNet(findDataFile("dnn/" + prefix + ".cfg"),
findDataFile("dnn/" + prefix + ".weights", false)); findDataFile("dnn/" + prefix + ".weights", false));
netAsync.setPreferableTarget(target); netAsync.setPreferableBackend(backendId);
netAsync.setPreferableTarget(targetId);
// Run asynchronously. To make test more robust, process inputs in the reversed order. // Run asynchronously. To make test more robust, process inputs in the reversed order.
for (int i = numInputs - 1; i >= 0; --i) for (int i = numInputs - 1; i >= 0; --i)
@ -381,7 +391,7 @@ TEST_P(Test_Darknet_nets_async, Accuracy)
INSTANTIATE_TEST_CASE_P(/**/, Test_Darknet_nets_async, Combine( INSTANTIATE_TEST_CASE_P(/**/, Test_Darknet_nets_async, Combine(
Values("yolo-voc", "tiny-yolo-voc", "yolov3"), Values("yolo-voc", "tiny-yolo-voc", "yolov3"),
ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)) dnnBackendsAndTargets()
)); ));
#endif #endif
@ -408,7 +418,7 @@ TEST_P(Test_Darknet_nets, YOLOv3)
std::string weights_file = "yolov3.weights"; std::string weights_file = "yolov3.weights";
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
{ {
scoreDiff = 0.04; scoreDiff = 0.04;
@ -422,16 +432,16 @@ TEST_P(Test_Darknet_nets, YOLOv3)
} }
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (INF_ENGINE_VER_MAJOR_LE(2018050000) && target == DNN_TARGET_OPENCL) if (INF_ENGINE_VER_MAJOR_LE(2018050000) && target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
else if (INF_ENGINE_VER_MAJOR_EQ(2019020000)) else if (INF_ENGINE_VER_MAJOR_EQ(2019020000))
{ {
if (target == DNN_TARGET_OPENCL) if (target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2019R2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (target == DNN_TARGET_OPENCL_FP16) if (target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_2019R2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
} }
else if (target == DNN_TARGET_MYRIAD && else if (target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)

View File

@ -165,7 +165,7 @@ TEST_P(Deconvolution, Accuracy)
Target targetId = get<1>(get<7>(GetParam())); Target targetId = get<1>(get<7>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
&& inChannels == 6 && outChannels == 4 && group == 1 && inChannels == 6 && outChannels == 4 && group == 1
&& kernel == Size(1, 3) && pad == Size(1, 0) && kernel == Size(1, 3) && pad == Size(1, 0)
@ -278,7 +278,7 @@ TEST_P(AvePooling, Accuracy)
Target targetId = get<1>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam()));
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
&& kernel == Size(1, 1) && (stride == Size(1, 1) || stride == Size(2, 2))) && kernel == Size(1, 1) && (stride == Size(1, 1) || stride == Size(2, 2)))
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
@ -324,29 +324,34 @@ TEST_P(MaxPooling, Accuracy)
Target targetId = get<1>(get<5>(GetParam())); Target targetId = get<1>(get<5>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
&& inSize == Size(7, 6) && kernel == Size(3, 2) && inSize == Size(7, 6) && kernel == Size(3, 2)
&& (stride == Size(1, 1) || stride == Size(2, 2)) && (stride == Size(1, 1) || stride == Size(2, 2))
&& (pad == Size(0, 1) || pad == Size(1, 1)) && (pad == Size(0, 1) || pad == Size(1, 1))
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
&& (kernel == Size(2, 2) || kernel == Size(3, 2)) && (kernel == Size(2, 2) || kernel == Size(3, 2))
&& stride == Size(1, 1) && (pad == Size(0, 0) || pad == Size(0, 1)) && stride == Size(1, 1) && (pad == Size(0, 0) || pad == Size(0, 1))
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
&& (stride == Size(1, 1) || stride == Size(2, 2)) && (stride == Size(1, 1) || stride == Size(2, 2))
&& (pad == Size(0, 1) || pad == Size(1, 1)) && (pad == Size(0, 1) || pad == Size(1, 1))
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if defined(INF_ENGINE_RELEASE)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && stride != Size(1, 1) && pad != Size(0, 0))
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
#endif #endif
LayerParams lp; LayerParams lp;
@ -386,7 +391,7 @@ TEST_P(FullyConnected, Accuracy)
bool hasBias = get<3>(GetParam()); bool hasBias = get<3>(GetParam());
Backend backendId = get<0>(get<4>(GetParam())); Backend backendId = get<0>(get<4>(GetParam()));
Target targetId = get<1>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam()));
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && (targetId == DNN_TARGET_OPENCL_FP16 || if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (targetId == DNN_TARGET_OPENCL_FP16 ||
(targetId == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X))) { (targetId == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X))) {
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
@ -447,8 +452,10 @@ INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, SoftMax, Combine(
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
TEST_P(Test_Halide_layers, MaxPoolUnpool) TEST_P(Test_Halide_layers, MaxPoolUnpool)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
LayerParams pool; LayerParams pool;
pool.set("pool", "max"); pool.set("pool", "max");
@ -555,8 +562,8 @@ TEST_P(ReLU, Accuracy)
Target targetId = get<1>(get<1>(GetParam())); Target targetId = get<1>(get<1>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD && negativeSlope < 0) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD && negativeSlope < 0)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_2019R3, CV_TEST_TAG_DNN_SKIP_IE_2019R2, CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
LayerParams lp; LayerParams lp;
@ -666,17 +673,17 @@ TEST_P(Concat, Accuracy)
Target targetId = get<1>(get<2>(GetParam())); Target targetId = get<1>(get<2>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD
&& inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2) && inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5); // crash applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION); // crash
#endif #endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_CPU
&& inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2) && inSize == Vec3i(1, 4, 5) && numChannels == Vec3i(1, 6, 2)
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1); // TODO: IE_CPU applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION); // TODO: IE_CPU
#endif #endif
Net net; Net net;
@ -748,20 +755,25 @@ TEST_P(Eltwise, Accuracy)
Target targetId = get<1>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD && if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD &&
inSize == Vec3i(1, 4, 5)) inSize == Vec3i(1, 4, 5))
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && numConv > 1) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && numConv > 1)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_OPENCL && if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_OPENCL &&
op == "sum" && numConv == 1 && !weighted) op == "sum" && numConv == 1 && !weighted)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif
#if defined(INF_ENGINE_RELEASE)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && numConv > 1)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
Net net; Net net;

View File

@ -254,14 +254,16 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
infRequest.Infer(); infRequest.Infer();
} }
void runCV(Target target, const std::string& xmlPath, const std::string& binPath, void runCV(Backend backendId, Target targetId, const std::string& xmlPath, const std::string& binPath,
const std::map<std::string, cv::Mat>& inputsMap, const std::map<std::string, cv::Mat>& inputsMap,
std::map<std::string, cv::Mat>& outputsMap) std::map<std::string, cv::Mat>& outputsMap)
{ {
Net net = readNet(xmlPath, binPath); Net net = readNet(xmlPath, binPath);
for (auto& it : inputsMap) for (auto& it : inputsMap)
net.setInput(it.second, it.first); net.setInput(it.second, it.first);
net.setPreferableTarget(target);
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId);
std::vector<String> outNames = net.getUnconnectedOutLayersNames(); std::vector<String> outNames = net.getUnconnectedOutLayersNames();
std::vector<Mat> outs; std::vector<Mat> outs;
@ -275,14 +277,26 @@ void runCV(Target target, const std::string& xmlPath, const std::string& binPath
} }
} }
typedef TestWithParam<tuple<Target, std::string> > DNNTestOpenVINO; typedef TestWithParam<tuple< tuple<Backend, Target>, std::string> > DNNTestOpenVINO;
TEST_P(DNNTestOpenVINO, models) TEST_P(DNNTestOpenVINO, models)
{ {
initDLDTDataPath(); initDLDTDataPath();
Target target = (dnn::Target)(int)get<0>(GetParam()); const Backend backendId = get<0>(get<0>(GetParam()));
const Target targetId = get<1>(get<0>(GetParam()));
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
std::string modelName = get<1>(GetParam()); std::string modelName = get<1>(GetParam());
bool isFP16 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD); bool isFP16 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD);
const std::map<std::string, OpenVINOModelTestCaseInfo>& models = getOpenVINOTestModels(); const std::map<std::string, OpenVINOModelTestCaseInfo>& models = getOpenVINOTestModels();
const auto it = models.find(modelName); const auto it = models.find(modelName);
@ -296,10 +310,10 @@ TEST_P(DNNTestOpenVINO, models)
std::map<std::string, cv::Mat> inputsMap; std::map<std::string, cv::Mat> inputsMap;
std::map<std::string, cv::Mat> ieOutputsMap, cvOutputsMap; std::map<std::string, cv::Mat> ieOutputsMap, cvOutputsMap;
// Single Myriad device cannot be shared across multiple processes. // Single Myriad device cannot be shared across multiple processes.
if (target == DNN_TARGET_MYRIAD) if (targetId == DNN_TARGET_MYRIAD)
resetMyriadDevice(); resetMyriadDevice();
runIE(target, xmlPath, binPath, inputsMap, ieOutputsMap); runIE(targetId, xmlPath, binPath, inputsMap, ieOutputsMap);
runCV(target, xmlPath, binPath, inputsMap, cvOutputsMap); runCV(backendId, targetId, xmlPath, binPath, inputsMap, cvOutputsMap);
EXPECT_EQ(ieOutputsMap.size(), cvOutputsMap.size()); EXPECT_EQ(ieOutputsMap.size(), cvOutputsMap.size());
for (auto& srcIt : ieOutputsMap) for (auto& srcIt : ieOutputsMap)
@ -314,7 +328,7 @@ TEST_P(DNNTestOpenVINO, models)
INSTANTIATE_TEST_CASE_P(/**/, INSTANTIATE_TEST_CASE_P(/**/,
DNNTestOpenVINO, DNNTestOpenVINO,
Combine(testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)), Combine(dnnBackendsAndTargetsIE(),
testing::ValuesIn(getOpenVINOTestModelsList()) testing::ValuesIn(getOpenVINOTestModelsList())
) )
); );

View File

@ -146,8 +146,11 @@ TEST_P(Test_Caffe_layers, DeConvolution)
TEST_P(Test_Caffe_layers, InnerProduct) TEST_P(Test_Caffe_layers, InnerProduct)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
testLayerUsingCaffeModels("layer_inner_product", true); testLayerUsingCaffeModels("layer_inner_product", true);
@ -238,12 +241,20 @@ TEST_P(Test_Caffe_layers, Concat)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_VER_MAJOR_GE(2019010000) && INF_ENGINE_VER_MAJOR_LT(2019020000) #if INF_ENGINE_VER_MAJOR_GE(2019010000) && INF_ENGINE_VER_MAJOR_LT(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#elif INF_ENGINE_VER_MAJOR_EQ(2019020000) #elif INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2019R2); (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
testLayerUsingCaffeModels("layer_concat"); testLayerUsingCaffeModels("layer_concat");
testLayerUsingCaffeModels("layer_concat_optim", true, false); testLayerUsingCaffeModels("layer_concat_optim", true, false);
@ -252,8 +263,9 @@ TEST_P(Test_Caffe_layers, Concat)
TEST_P(Test_Caffe_layers, Fused_Concat) TEST_P(Test_Caffe_layers, Fused_Concat)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
checkBackend(); checkBackend();
@ -297,14 +309,15 @@ TEST_P(Test_Caffe_layers, Fused_Concat)
TEST_P(Test_Caffe_layers, Eltwise) TEST_P(Test_Caffe_layers, Eltwise)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
testLayerUsingCaffeModels("layer_eltwise"); testLayerUsingCaffeModels("layer_eltwise");
} }
TEST_P(Test_Caffe_layers, PReLU) TEST_P(Test_Caffe_layers, PReLU)
{ {
testLayerUsingCaffeModels("layer_prelu", true); double lInf = (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.021 : 0.0;
testLayerUsingCaffeModels("layer_prelu", true, true, 0.0, lInf);
} }
// TODO: fix an unstable test case // TODO: fix an unstable test case
@ -320,8 +333,10 @@ TEST_P(Test_Caffe_layers, layer_prelu_fc)
TEST_P(Test_Caffe_layers, Reshape_Split_Slice) TEST_P(Test_Caffe_layers, Reshape_Split_Slice)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
Net net = readNetFromCaffe(_tf("reshape_and_slice_routines.prototxt")); Net net = readNetFromCaffe(_tf("reshape_and_slice_routines.prototxt"));
ASSERT_FALSE(net.empty()); ASSERT_FALSE(net.empty());
@ -342,8 +357,8 @@ TEST_P(Test_Caffe_layers, Reshape_Split_Slice)
TEST_P(Test_Caffe_layers, Conv_Elu) TEST_P(Test_Caffe_layers, Conv_Elu)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE <= 2018050000 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE <= 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
Net net = readNetFromTensorflow(_tf("layer_elu_model.pb")); Net net = readNetFromTensorflow(_tf("layer_elu_model.pb"));
@ -556,29 +571,38 @@ TEST_F(Layer_RNN_Test, get_set_test)
EXPECT_EQ(shape(outputs[1]), shape(nT, nS, nH)); EXPECT_EQ(shape(outputs[1]), shape(nT, nS, nH));
} }
TEST(Layer_Test_ROIPooling, Accuracy) TEST_P(Test_Caffe_layers, ROIPooling_Accuracy)
{ {
Net net = readNetFromCaffe(_tf("net_roi_pooling.prototxt")); Net net = readNetFromCaffe(_tf("net_roi_pooling.prototxt"));
ASSERT_FALSE(net.empty());
Mat inp = blobFromNPY(_tf("net_roi_pooling.input.npy")); Mat inp = blobFromNPY(_tf("net_roi_pooling.input.npy"));
Mat rois = blobFromNPY(_tf("net_roi_pooling.rois.npy")); Mat rois = blobFromNPY(_tf("net_roi_pooling.rois.npy"));
Mat ref = blobFromNPY(_tf("net_roi_pooling.npy")); Mat ref = blobFromNPY(_tf("net_roi_pooling.npy"));
checkBackend(&inp, &ref);
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(inp, "input"); net.setInput(inp, "input");
net.setInput(rois, "rois"); net.setInput(rois, "rois");
net.setPreferableBackend(DNN_BACKEND_OPENCV);
Mat out = net.forward(); Mat out = net.forward();
normAssert(out, ref); double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1e-3 : 1e-5;
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1e-3 : 1e-4;
normAssert(out, ref, "", l1, lInf);
} }
TEST_P(Test_Caffe_layers, FasterRCNN_Proposal) TEST_P(Test_Caffe_layers, FasterRCNN_Proposal)
{ {
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if(backend == DNN_BACKEND_CUDA) if(backend == DNN_BACKEND_CUDA)
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* Proposal layer is unsupported */ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* Proposal layer is unsupported */
@ -812,16 +836,21 @@ TEST_P(Test_Caffe_layers, PriorBox_repeated)
randu(shape, -1.0f, 1.0f); randu(shape, -1.0f, 1.0f);
net.setInput(inp, "data"); net.setInput(inp, "data");
net.setInput(shape, "shape"); net.setInput(shape, "shape");
net.setPreferableBackend(backend);
net.setPreferableTarget(target);
Mat out = net.forward(); Mat out = net.forward();
Mat ref = blobFromNPY(_tf("priorbox_output.npy")); Mat ref = blobFromNPY(_tf("priorbox_output.npy"));
normAssert(out, ref, "");
double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1e-3 : 1e-5;
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1e-3 : 1e-4;
normAssert(out, ref, "", l1, lInf);
} }
// Test PriorBoxLayer in case of no aspect ratios (just squared proposals). // Test PriorBoxLayer in case of no aspect ratios (just squared proposals).
TEST_P(Test_Caffe_layers, PriorBox_squares) TEST_P(Test_Caffe_layers, PriorBox_squares)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
LayerParams lp; LayerParams lp;
lp.name = "testPriorBox"; lp.name = "testPriorBox";
lp.type = "PriorBox"; lp.type = "PriorBox";
@ -975,10 +1004,21 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_DWconv_Prelu, Combine(Values(3, 6), Val
// Using Intel's Model Optimizer generate .xml and .bin files: // Using Intel's Model Optimizer generate .xml and .bin files:
// ./ModelOptimizer -w /path/to/caffemodel -d /path/to/prototxt \ // ./ModelOptimizer -w /path/to/caffemodel -d /path/to/prototxt \
// -p FP32 -i -b ${batch_size} -o /path/to/output/folder // -p FP32 -i -b ${batch_size} -o /path/to/output/folder
typedef testing::TestWithParam<Target> Layer_Test_Convolution_DLDT; typedef testing::TestWithParam<tuple<Backend, Target> > Layer_Test_Convolution_DLDT;
TEST_P(Layer_Test_Convolution_DLDT, Accuracy) TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
{ {
Target targetId = GetParam(); const Backend backendId = get<0>(GetParam());
const Target targetId = get<1>(GetParam());
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : ""; std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt")); Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt"));
@ -991,6 +1031,7 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
Mat outDefault = netDefault.forward(); Mat outDefault = netDefault.forward();
net.setInput(inp); net.setInput(inp);
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId); net.setPreferableTarget(targetId);
Mat out = net.forward(); Mat out = net.forward();
@ -1006,10 +1047,22 @@ TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8) TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
{ {
Target targetId = GetParam(); const Backend backendId = get<0>(GetParam());
Mat inp = blobFromNPY(_tf("blob.npy")); const Target targetId = get<1>(GetParam());
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
int blobSize[] = {2, 6, 75, 113};
Mat inputs[] = {Mat(4, &blobSize[0], CV_8U), Mat()};
Mat inputs[] = {Mat(inp.dims, inp.size, CV_8U), Mat()};
randu(inputs[0], 0, 255); randu(inputs[0], 0, 255);
inputs[0].convertTo(inputs[1], CV_32F); inputs[0].convertTo(inputs[1], CV_32F);
@ -1019,6 +1072,7 @@ TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
for (int i = 0; i < 2; ++i) for (int i = 0; i < 2; ++i)
{ {
Net net = readNet(_tf("layer_convolution" + suffix + ".xml"), _tf("layer_convolution" + suffix + ".bin")); Net net = readNet(_tf("layer_convolution" + suffix + ".xml"), _tf("layer_convolution" + suffix + ".bin"));
net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId); net.setPreferableTarget(targetId);
net.setInput(inputs[i]); net.setInput(inputs[i]);
outs[i] = net.forward(); outs[i] = net.forward();
@ -1030,7 +1084,19 @@ TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
TEST_P(Layer_Test_Convolution_DLDT, multithreading) TEST_P(Layer_Test_Convolution_DLDT, multithreading)
{ {
Target targetId = GetParam(); const Backend backendId = get<0>(GetParam());
const Target targetId = get<1>(GetParam());
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : ""; std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
std::string xmlPath = _tf("layer_convolution" + suffix + ".xml"); std::string xmlPath = _tf("layer_convolution" + suffix + ".xml");
std::string binPath = _tf("layer_convolution" + suffix + ".bin"); std::string binPath = _tf("layer_convolution" + suffix + ".bin");
@ -1040,7 +1106,9 @@ TEST_P(Layer_Test_Convolution_DLDT, multithreading)
firstNet.setInput(inp); firstNet.setInput(inp);
secondNet.setInput(inp); secondNet.setInput(inp);
firstNet.setPreferableBackend(backendId);
firstNet.setPreferableTarget(targetId); firstNet.setPreferableTarget(targetId);
secondNet.setPreferableBackend(backendId);
secondNet.setPreferableTarget(targetId); secondNet.setPreferableTarget(targetId);
Mat out1, out2; Mat out1, out2;
@ -1058,7 +1126,8 @@ TEST_P(Layer_Test_Convolution_DLDT, multithreading)
} }
INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Convolution_DLDT, INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Convolution_DLDT,
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))); dnnBackendsAndTargetsIE()
);
// 1. Create a .prototxt file with the following network: // 1. Create a .prototxt file with the following network:
// layer { // layer {
@ -1117,17 +1186,18 @@ std::vector< std::vector<int> > list_sizes{ {1, 2, 3}, {3, 2, 1}, {5, 5, 5}, {13
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs_3dim, Combine( INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs_3dim, Combine(
Values(CV_8U, CV_32F), Values(CV_8U, CV_32F), Values(CV_8U, CV_32F), Values(CV_8U, CV_32F),
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)), testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)),
testing::ValuesIn(list_sizes) testing::ValuesIn(list_sizes)
)); ));
typedef testing::TestWithParam<tuple<int, int, Target> > Test_DLDT_two_inputs; typedef testing::TestWithParam<tuple<int, int, tuple<Backend, Target> > > Test_DLDT_two_inputs;
TEST_P(Test_DLDT_two_inputs, as_backend) TEST_P(Test_DLDT_two_inputs, as_backend)
{ {
static const float kScale = 0.5f; static const float kScale = 0.5f;
static const float kScaleInv = 1.0f / kScale; static const float kScaleInv = 1.0f / kScale;
Target targetId = get<2>(GetParam()); Backend backendId = get<0>(get<2>(GetParam()));
Target targetId = get<1>(get<2>(GetParam()));
Net net; Net net;
LayerParams lp; LayerParams lp;
@ -1146,7 +1216,7 @@ TEST_P(Test_DLDT_two_inputs, as_backend)
net.setInputsNames({"data", "second_input"}); net.setInputsNames({"data", "second_input"});
net.setInput(firstInp, "data", kScale); net.setInput(firstInp, "data", kScale);
net.setInput(secondInp, "second_input", kScaleInv); net.setInput(secondInp, "second_input", kScaleInv);
net.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE); net.setPreferableBackend(backendId);
net.setPreferableTarget(targetId); net.setPreferableTarget(targetId);
Mat out = net.forward(); Mat out = net.forward();
@ -1160,7 +1230,7 @@ TEST_P(Test_DLDT_two_inputs, as_backend)
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine( INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine(
Values(CV_8U, CV_32F), Values(CV_8U, CV_32F), Values(CV_8U, CV_32F), Values(CV_8U, CV_32F),
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)) dnnBackendsAndTargets()
)); ));
class UnsupportedLayer : public Layer class UnsupportedLayer : public Layer
@ -1181,10 +1251,11 @@ public:
virtual void forward(cv::InputArrayOfArrays inputs, cv::OutputArrayOfArrays outputs, cv::OutputArrayOfArrays internals) CV_OVERRIDE {} virtual void forward(cv::InputArrayOfArrays inputs, cv::OutputArrayOfArrays outputs, cv::OutputArrayOfArrays internals) CV_OVERRIDE {}
}; };
TEST(Test_DLDT, fused_output) typedef DNNTestLayer Test_DLDT_layers;
static void test_dldt_fused_output(Backend backend, Target target)
{ {
static const int kNumChannels = 3; static const int kNumChannels = 3;
CV_DNN_REGISTER_LAYER_CLASS(Unsupported, UnsupportedLayer);
Net net; Net net;
{ {
LayerParams lp; LayerParams lp;
@ -1208,13 +1279,31 @@ TEST(Test_DLDT, fused_output)
LayerParams lp; LayerParams lp;
net.addLayerToPrev("unsupported_layer", "Unsupported", lp); net.addLayerToPrev("unsupported_layer", "Unsupported", lp);
} }
net.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE); net.setPreferableBackend(backend);
net.setPreferableTarget(target);
net.setInput(Mat({1, 1, 1, 1}, CV_32FC1, Scalar(1))); net.setInput(Mat({1, 1, 1, 1}, CV_32FC1, Scalar(1)));
ASSERT_NO_THROW(net.forward()); net.forward();
}
TEST_P(Test_DLDT_layers, fused_output)
{
CV_DNN_REGISTER_LAYER_CLASS(Unsupported, UnsupportedLayer);
try
{
test_dldt_fused_output(backend, target);
}
catch (const std::exception& e)
{
ADD_FAILURE() << "Exception: " << e.what();
}
catch(...)
{
ADD_FAILURE() << "Unknown exception";
}
LayerFactory::unregisterLayer("Unsupported"); LayerFactory::unregisterLayer("Unsupported");
} }
TEST(Test_DLDT, multiple_networks) TEST_P(Test_DLDT_layers, multiple_networks)
{ {
Net nets[2]; Net nets[2];
for (int i = 0; i < 2; ++i) for (int i = 0; i < 2; ++i)
@ -1229,7 +1318,8 @@ TEST(Test_DLDT, multiple_networks)
lp.name = format("testConv_%d", i); lp.name = format("testConv_%d", i);
lp.blobs.push_back(Mat({1, 1, 1, 1}, CV_32F, Scalar(1 + i))); lp.blobs.push_back(Mat({1, 1, 1, 1}, CV_32F, Scalar(1 + i)));
nets[i].addLayerToPrev(lp.name, lp.type, lp); nets[i].addLayerToPrev(lp.name, lp.type, lp);
nets[i].setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE); nets[i].setPreferableBackend(backend);
nets[i].setPreferableTarget(target);
nets[i].setInput(Mat({1, 1, 1, 1}, CV_32FC1, Scalar(1))); nets[i].setInput(Mat({1, 1, 1, 1}, CV_32FC1, Scalar(1)));
} }
Mat out_1 = nets[0].forward(); Mat out_1 = nets[0].forward();
@ -1238,6 +1328,9 @@ TEST(Test_DLDT, multiple_networks)
out_1 = nets[0].forward(); out_1 = nets[0].forward();
normAssert(2 * out_1, out_2); normAssert(2 * out_1, out_2);
} }
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_layers, dnnBackendsAndTargets());
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE
// Test a custom layer. // Test a custom layer.
@ -1353,7 +1446,7 @@ TEST_P(Test_Caffe_layers, Interp)
TEST_P(Test_Caffe_layers, DISABLED_Interp) // requires patched protobuf (available in OpenCV source tree only) TEST_P(Test_Caffe_layers, DISABLED_Interp) // requires patched protobuf (available in OpenCV source tree only)
#endif #endif
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
// Test a custom layer. // Test a custom layer.

View File

@ -371,21 +371,34 @@ static const std::chrono::milliseconds async_timeout(10000);
// This test runs network in synchronous mode for different inputs and then // This test runs network in synchronous mode for different inputs and then
// runs the same model asynchronously for the same inputs. // runs the same model asynchronously for the same inputs.
typedef testing::TestWithParam<tuple<int, Target> > Async; typedef testing::TestWithParam<tuple<int, tuple<Backend, Target> > > Async;
TEST_P(Async, set_and_forward_single) TEST_P(Async, model_optimizer_pipeline_set_and_forward_single)
{ {
const int dtype = get<0>(GetParam()); const int dtype = get<0>(GetParam());
const int target = get<1>(GetParam()); const Backend backendId = get<0>(get<1>(GetParam()));
const Target targetId = get<1>(get<1>(GetParam()));
const std::string suffix = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "_fp16" : ""; if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
const std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin"); const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml"); const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
Net netSync = readNet(model, proto); Net netSync = readNet(model, proto);
netSync.setPreferableTarget(target); netSync.setPreferableBackend(backendId);
netSync.setPreferableTarget(targetId);
Net netAsync = readNet(model, proto); Net netAsync = readNet(model, proto);
netAsync.setPreferableTarget(target); netAsync.setPreferableBackend(backendId);
netAsync.setPreferableTarget(targetId);
// Generate inputs. // Generate inputs.
const int numInputs = 10; const int numInputs = 10;
@ -418,21 +431,33 @@ TEST_P(Async, set_and_forward_single)
} }
} }
TEST_P(Async, set_and_forward_all) TEST_P(Async, model_optimizer_pipeline_set_and_forward_all)
{ {
const int dtype = get<0>(GetParam()); const int dtype = get<0>(GetParam());
const int target = get<1>(GetParam()); const Backend backendId = get<0>(get<1>(GetParam()));
const Target targetId = get<1>(get<1>(GetParam()));
const std::string suffix = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "_fp16" : ""; if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
const std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin"); const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml"); const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
Net netSync = readNet(model, proto); Net netSync = readNet(model, proto);
netSync.setPreferableTarget(target); netSync.setPreferableBackend(backendId);
netSync.setPreferableTarget(targetId);
Net netAsync = readNet(model, proto); Net netAsync = readNet(model, proto);
netAsync.setPreferableTarget(target); netAsync.setPreferableBackend(backendId);
netAsync.setPreferableTarget(targetId);
// Generate inputs. // Generate inputs.
const int numInputs = 10; const int numInputs = 10;
@ -469,25 +494,136 @@ TEST_P(Async, set_and_forward_all)
} }
} }
TEST_P(Async, create_layer_pipeline_set_and_forward_all)
{
const int dtype = get<0>(GetParam());
const Backend backendId = get<0>(get<1>(GetParam()));
const Target targetId = get<1>(get<1>(GetParam()));
if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
throw SkipTestException("No support for async forward");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
Net netSync;
Net netAsync;
{
int inChannels = 4;
int outChannels = 12;
int group = 3;
Size inSize(113, 75);
Size kernel(4, 5);
Size stride(2, 3);
Size pad(0, 1);
Size dilation(1, 1);
bool hasBias = true;
int sz[] = {outChannels, inChannels / group, kernel.height, kernel.width};
Mat weights(4, &sz[0], CV_32F);
randu(weights, -1.0f, 1.0f);
LayerParams lp;
lp.set("kernel_w", kernel.width);
lp.set("kernel_h", kernel.height);
lp.set("pad_w", pad.width);
lp.set("pad_h", pad.height);
lp.set("stride_w", stride.width);
lp.set("stride_h", stride.height);
lp.set("dilation_w", dilation.width);
lp.set("dilation_h", dilation.height);
lp.set("num_output", outChannels);
lp.set("group", group);
lp.set("bias_term", hasBias);
lp.type = "Convolution";
lp.name = "testLayer";
lp.blobs.push_back(weights);
if (hasBias)
{
Mat bias(1, outChannels, CV_32F);
randu(bias, -1.0f, 1.0f);
lp.blobs.push_back(bias);
}
int inpSz[] = {1, inChannels, inSize.height, inSize.width};
Mat input(4, &inpSz[0], CV_32F);
netSync.addLayerToPrev(lp.name, lp.type, lp);
netAsync.addLayerToPrev(lp.name, lp.type, lp);
}
netSync.setPreferableBackend(backendId);
netSync.setPreferableTarget(targetId);
netAsync.setPreferableBackend(backendId);
netAsync.setPreferableTarget(targetId);
// Generate inputs.
const int numInputs = 10;
std::vector<Mat> inputs(numInputs);
int blobSize[] = {1, 4, 75, 113};
for (int i = 0; i < numInputs; ++i)
{
inputs[i].create(4, &blobSize[0], dtype);
randu(inputs[i], 0, 255);
}
// Run synchronously.
std::vector<Mat> refs(numInputs);
for (int i = 0; i < numInputs; ++i)
{
netSync.setInput(inputs[i]);
refs[i] = netSync.forward().clone();
}
// Run asynchronously. To make test more robust, process inputs in the reversed order.
std::vector<AsyncArray> outs(numInputs);
for (int i = numInputs - 1; i >= 0; --i)
{
netAsync.setInput(inputs[i]);
outs[i] = netAsync.forwardAsync();
}
for (int i = numInputs - 1; i >= 0; --i)
{
ASSERT_TRUE(outs[i].valid());
Mat result;
EXPECT_TRUE(outs[i].get(result, async_timeout));
normAssert(refs[i], result, format("Index: %d", i).c_str(), 0, 0);
}
}
INSTANTIATE_TEST_CASE_P(/**/, Async, Combine( INSTANTIATE_TEST_CASE_P(/**/, Async, Combine(
Values(CV_32F, CV_8U), Values(CV_32F, CV_8U),
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)) dnnBackendsAndTargetsIE()
)); ));
typedef testing::TestWithParam<Target> Test_Model_Optimizer; typedef testing::TestWithParam<tuple<Backend, Target> > Test_Model_Optimizer;
TEST_P(Test_Model_Optimizer, forward_two_nets) TEST_P(Test_Model_Optimizer, forward_two_nets)
{ {
const int target = GetParam(); const Backend backendId = get<0>(GetParam());
const Target targetId = get<1>(GetParam());
const std::string suffix = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? "_fp16" : ""; const std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin"); const std::string& model = findDataFile("dnn/layers/layer_convolution" + suffix + ".bin");
const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml"); const std::string& proto = findDataFile("dnn/layers/layer_convolution" + suffix + ".xml");
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_API);
else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
setInferenceEngineBackendType(CV_DNN_BACKEND_INFERENCE_ENGINE_NGRAPH);
else
FAIL() << "Unknown backendId";
Net net0 = readNet(model, proto); Net net0 = readNet(model, proto);
net0.setPreferableTarget(target); net0.setPreferableTarget(targetId);
Net net1 = readNet(model, proto); Net net1 = readNet(model, proto);
net1.setPreferableTarget(target); net1.setPreferableTarget(targetId);
// Generate inputs. // Generate inputs.
int blobSize[] = {2, 6, 75, 113}; int blobSize[] = {2, 6, 75, 113};
@ -506,7 +642,7 @@ TEST_P(Test_Model_Optimizer, forward_two_nets)
normAssert(ref0, ref2, 0, 0); normAssert(ref0, ref2, 0, 0);
} }
INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer, INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer,
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)) dnnBackendsAndTargetsIE()
); );
#endif // HAVE_INF_ENGINE #endif // HAVE_INF_ENGINE

View File

@ -101,8 +101,7 @@ TEST_P(Test_ONNX_layers, Convolution)
TEST_P(Test_ONNX_layers, Convolution3D) TEST_P(Test_ONNX_layers, Convolution3D)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
if(backend == DNN_BACKEND_INFERENCE_ENGINE) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
throw SkipTestException("Test is enabled starts from 2019R1");
#endif #endif
if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA) if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA)
throw SkipTestException("Only CPU and CUDA is supported"); throw SkipTestException("Only CPU and CUDA is supported");
@ -113,10 +112,10 @@ TEST_P(Test_ONNX_layers, Convolution3D)
TEST_P(Test_ONNX_layers, Two_convolution) TEST_P(Test_ONNX_layers, Two_convolution)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
// Reference output values are in range [-0.855, 0.611] // Reference output values are in range [-0.855, 0.611]
testONNXModels("two_convolution"); testONNXModels("two_convolution");
@ -133,11 +132,15 @@ TEST_P(Test_ONNX_layers, Deconvolution)
TEST_P(Test_ONNX_layers, Deconvolution3D) TEST_P(Test_ONNX_layers, Deconvolution3D)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if ((backend != DNN_BACKEND_INFERENCE_ENGINE || target != DNN_TARGET_CPU) && backend != DNN_BACKEND_CUDA) if (backend == DNN_BACKEND_CUDA)
throw SkipTestException("Only DLIE backend on CPU, and CUDA is supported"); {
// ok
}
else if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU)
throw SkipTestException("Only DLIE backend on CPU is supported");
testONNXModels("deconv3d"); testONNXModels("deconv3d");
testONNXModels("deconv3d_bias"); testONNXModels("deconv3d_bias");
testONNXModels("deconv3d_pad"); testONNXModels("deconv3d_pad");
@ -173,8 +176,17 @@ TEST_P(Test_ONNX_layers, ReduceMean)
TEST_P(Test_ONNX_layers, ReduceMean3D) TEST_P(Test_ONNX_layers, ReduceMean3D)
{ {
if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA) if (backend == DNN_BACKEND_CUDA)
throw SkipTestException("Only CPU and CUDA is supported"); {
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
testONNXModels("reduce_mean3d"); testONNXModels("reduce_mean3d");
} }
@ -185,11 +197,11 @@ TEST_P(Test_ONNX_layers, MaxPooling_Sigmoid)
TEST_P(Test_ONNX_layers, Concatenation) TEST_P(Test_ONNX_layers, Concatenation)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
} }
testONNXModels("concatenation"); testONNXModels("concatenation");
} }
@ -197,10 +209,12 @@ TEST_P(Test_ONNX_layers, Concatenation)
TEST_P(Test_ONNX_layers, Eltwise3D) TEST_P(Test_ONNX_layers, Eltwise3D)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1"); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU on DLIE backend is supported"); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
testONNXModels("eltwise3d"); testONNXModels("eltwise3d");
} }
@ -212,30 +226,54 @@ TEST_P(Test_ONNX_layers, AveragePooling)
TEST_P(Test_ONNX_layers, MaxPooling3D) TEST_P(Test_ONNX_layers, MaxPooling3D)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1"); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA) if (backend == DNN_BACKEND_CUDA)
throw SkipTestException("Only CPU and CUDA is supported"); {
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
testONNXModels("max_pool3d", npy, 0, 0, false, false); testONNXModels("max_pool3d", npy, 0, 0, false, false);
} }
TEST_P(Test_ONNX_layers, AvePooling3D) TEST_P(Test_ONNX_layers, AvePooling3D)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1"); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA) if (backend == DNN_BACKEND_CUDA)
throw SkipTestException("Only CPU and CUDA is supported"); {
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
testONNXModels("ave_pool3d"); testONNXModels("ave_pool3d");
} }
TEST_P(Test_ONNX_layers, PoolConv3D) TEST_P(Test_ONNX_layers, PoolConv3D)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1"); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA) if (backend == DNN_BACKEND_CUDA)
throw SkipTestException("Only CPU and CUDA is supported"); {
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
testONNXModels("pool_conv_3d"); testONNXModels("pool_conv_3d");
} }
@ -246,22 +284,22 @@ TEST_P(Test_ONNX_layers, BatchNormalization)
TEST_P(Test_ONNX_layers, BatchNormalization3D) TEST_P(Test_ONNX_layers, BatchNormalization3D)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
} }
testONNXModels("batch_norm_3d"); testONNXModels("batch_norm_3d");
} }
TEST_P(Test_ONNX_layers, Transpose) TEST_P(Test_ONNX_layers, Transpose)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
} }
testONNXModels("transpose"); testONNXModels("transpose");
} }
@ -270,17 +308,17 @@ TEST_P(Test_ONNX_layers, Multiplication)
{ {
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
testONNXModels("mul"); testONNXModels("mul");
} }
TEST_P(Test_ONNX_layers, Constant) TEST_P(Test_ONNX_layers, Constant)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
testONNXModels("constant"); testONNXModels("constant");
} }
@ -346,10 +384,10 @@ TEST_P(Test_ONNX_layers, Div)
TEST_P(Test_ONNX_layers, DynamicReshape) TEST_P(Test_ONNX_layers, DynamicReshape)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
} }
testONNXModels("dynamic_reshape"); testONNXModels("dynamic_reshape");
} }
@ -386,8 +424,10 @@ TEST_P(Test_ONNX_layers, Softmax)
TEST_P(Test_ONNX_layers, Split_EltwiseMax) TEST_P(Test_ONNX_layers, Split_EltwiseMax)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
testONNXModels("split_max"); testONNXModels("split_max");
} }
@ -429,8 +469,8 @@ TEST_P(Test_ONNX_nets, Squeezenet)
TEST_P(Test_ONNX_nets, Googlenet) TEST_P(Test_ONNX_nets, Googlenet)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
const String model = _tf("models/googlenet.onnx", false); const String model = _tf("models/googlenet.onnx", false);
@ -459,9 +499,9 @@ TEST_P(Test_ONNX_nets, CaffeNet)
{ {
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB); applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R3); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
testONNXModels("caffenet", pb); testONNXModels("caffenet", pb);
} }
@ -470,9 +510,9 @@ TEST_P(Test_ONNX_nets, RCNN_ILSVRC13)
{ {
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB); applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019030000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_2019R3); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
// Reference output values are in range [-4.992, -1.161] // Reference output values are in range [-4.992, -1.161]
testONNXModels("rcnn_ilsvrc13", pb, 0.0045); testONNXModels("rcnn_ilsvrc13", pb, 0.0045);
@ -514,12 +554,12 @@ TEST_P(Test_ONNX_nets, ResNet101_DUC_HDC)
applyTestTag(CV_TEST_TAG_VERYLONG); applyTestTag(CV_TEST_TAG_VERYLONG);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R1, CV_TEST_TAG_DNN_SKIP_IE_2019R1_1); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL) if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_OPENCL)
{ {
@ -537,15 +577,15 @@ TEST_P(Test_ONNX_nets, TinyYolov2)
if (cvtest::skipUnstableTests) if (cvtest::skipUnstableTests)
throw SkipTestException("Skip unstable test"); throw SkipTestException("Skip unstable test");
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16) && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
) )
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
// output range: [-11; 8] // output range: [-11; 8]
@ -572,11 +612,11 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB), (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB),
CV_TEST_TAG_DEBUG_LONG CV_TEST_TAG_DEBUG_LONG
); );
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
} }
double l1 = default_l1; double l1 = default_l1;
@ -586,7 +626,7 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
l1 = 0.009; l1 = 0.009;
lInf = 0.035; lInf = 0.035;
} }
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) { else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_CPU) {
l1 = 4.6e-5; l1 = 4.6e-5;
lInf = 1.9e-4; lInf = 1.9e-4;
} }
@ -596,10 +636,10 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
TEST_P(Test_ONNX_nets, Emotion_ferplus) TEST_P(Test_ONNX_nets, Emotion_ferplus)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
double l1 = default_l1; double l1 = default_l1;
@ -608,12 +648,12 @@ TEST_P(Test_ONNX_nets, Emotion_ferplus)
// Output values are in range [-2.011, 2.111] // Output values are in range [-2.011, 2.111]
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
l1 = 0.007; l1 = 0.007;
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL_FP16)
{ {
l1 = 0.021; l1 = 0.021;
lInf = 0.034; lInf = 0.034;
} }
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL)) { else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_CPU || target == DNN_TARGET_OPENCL)) {
l1 = 2.4e-4; l1 = 2.4e-4;
lInf = 6e-4; lInf = 6e-4;
} }
@ -636,7 +676,7 @@ TEST_P(Test_ONNX_nets, DenseNet121)
TEST_P(Test_ONNX_nets, Inception_v1) TEST_P(Test_ONNX_nets, Inception_v1)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif #endif
testONNXModels("inception_v1", pb); testONNXModels("inception_v1", pb);
@ -644,11 +684,11 @@ TEST_P(Test_ONNX_nets, Inception_v1)
TEST_P(Test_ONNX_nets, Shufflenet) TEST_P(Test_ONNX_nets, Shufflenet)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
} }
testONNXModels("shufflenet", pb); testONNXModels("shufflenet", pb);
} }
@ -656,10 +696,18 @@ TEST_P(Test_ONNX_nets, Shufflenet)
TEST_P(Test_ONNX_nets, Resnet34_kinetics) TEST_P(Test_ONNX_nets, Resnet34_kinetics)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1"); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA) if (backend == DNN_BACKEND_CUDA)
throw SkipTestException("Only CPU and CUDA is supported"); {
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false); String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false);
Mat image0 = imread(findDataFile("dnn/dog416.png")); Mat image0 = imread(findDataFile("dnn/dog416.png"));
@ -679,6 +727,9 @@ TEST_P(Test_ONNX_nets, Resnet34_kinetics)
lp.set("order", DictValue::arrayInt<int*>(&order[0], 4)); lp.set("order", DictValue::arrayInt<int*>(&order[0], 4));
permute.addLayerToPrev("perm", "Permute", lp); permute.addLayerToPrev("perm", "Permute", lp);
permute.setPreferableBackend(backend);
permute.setPreferableTarget(target);
permute.setInput(blob0); permute.setInput(blob0);
Mat input0 = permute.forward().clone(); Mat input0 = permute.forward().clone();

View File

@ -134,10 +134,19 @@ TEST_P(Test_TensorFlow_layers, conv)
TEST_P(Test_TensorFlow_layers, Convolution3D) TEST_P(Test_TensorFlow_layers, Convolution3D)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1"); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA) if (backend == DNN_BACKEND_CUDA)
throw SkipTestException("Only CPU and CUDA is supported"); {
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
runTensorFlowNet("conv3d"); runTensorFlowNet("conv3d");
} }
@ -147,12 +156,12 @@ TEST_P(Test_TensorFlow_layers, padding)
runTensorFlowNet("spatial_padding"); runTensorFlowNet("spatial_padding");
runTensorFlowNet("mirror_pad"); runTensorFlowNet("mirror_pad");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
{ {
if (target == DNN_TARGET_MYRIAD) if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2019R3, CV_TEST_TAG_DNN_SKIP_IE_2019R2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (target == DNN_TARGET_OPENCL_FP16) if (target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_2019R3, CV_TEST_TAG_DNN_SKIP_IE_2019R2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
} }
#endif #endif
runTensorFlowNet("keras_pad_concat"); runTensorFlowNet("keras_pad_concat");
@ -199,11 +208,11 @@ TEST_P(Test_TensorFlow_layers, batch_norm)
TEST_P(Test_TensorFlow_layers, batch_norm3D) TEST_P(Test_TensorFlow_layers, batch_norm3D)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
{ {
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
throw SkipTestException(""); throw SkipTestException("");
} }
runTensorFlowNet("batch_norm3d"); runTensorFlowNet("batch_norm3d");
@ -211,8 +220,10 @@ TEST_P(Test_TensorFlow_layers, batch_norm3D)
TEST_P(Test_TensorFlow_layers, slim_batch_norm) TEST_P(Test_TensorFlow_layers, slim_batch_norm)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
// Output values range: [-40.0597, 207.827] // Output values range: [-40.0597, 207.827]
double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.041 : default_l1; double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.041 : default_l1;
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.33 : default_lInf; double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.33 : default_lInf;
@ -229,8 +240,10 @@ TEST_P(Test_TensorFlow_layers, pooling)
TEST_P(Test_TensorFlow_layers, max_pool_grad) TEST_P(Test_TensorFlow_layers, max_pool_grad)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTensorFlowNet("max_pool_grad"); runTensorFlowNet("max_pool_grad");
} }
@ -239,10 +252,10 @@ TEST_P(Test_TensorFlow_layers, ave_pool_same)
{ {
// Reference output values are in range [-0.519531, 0.112976] // Reference output values are in range [-0.519531, 0.112976]
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
runTensorFlowNet("ave_pool_same"); runTensorFlowNet("ave_pool_same");
} }
@ -250,20 +263,38 @@ TEST_P(Test_TensorFlow_layers, ave_pool_same)
TEST_P(Test_TensorFlow_layers, MaxPooling3D) TEST_P(Test_TensorFlow_layers, MaxPooling3D)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1"); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA) if (backend == DNN_BACKEND_CUDA)
throw SkipTestException("Only CPU and CUDA is supported"); {
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
runTensorFlowNet("max_pool3d"); runTensorFlowNet("max_pool3d");
} }
TEST_P(Test_TensorFlow_layers, AvePooling3D) TEST_P(Test_TensorFlow_layers, AvePooling3D)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000)
throw SkipTestException("Test is enabled starts from 2019R1"); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
if (target != DNN_TARGET_CPU && backend != DNN_BACKEND_CUDA) if (backend == DNN_BACKEND_CUDA)
throw SkipTestException("Only CPU and CUDA is supported"); {
// ok
}
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported
else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported
else if (target != DNN_TARGET_CPU)
throw SkipTestException("Only CPU is supported");
runTensorFlowNet("ave_pool3d"); runTensorFlowNet("ave_pool3d");
} }
@ -296,8 +327,10 @@ TEST_P(Test_TensorFlow_layers, matmul)
TEST_P(Test_TensorFlow_layers, reshape) TEST_P(Test_TensorFlow_layers, reshape)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTensorFlowNet("shift_reshape_no_reorder"); runTensorFlowNet("shift_reshape_no_reorder");
runTensorFlowNet("reshape_no_reorder"); runTensorFlowNet("reshape_no_reorder");
runTensorFlowNet("reshape_reduce"); runTensorFlowNet("reshape_reduce");
@ -307,10 +340,10 @@ TEST_P(Test_TensorFlow_layers, reshape)
TEST_P(Test_TensorFlow_layers, flatten) TEST_P(Test_TensorFlow_layers, flatten)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
runTensorFlowNet("flatten", true); runTensorFlowNet("flatten", true);
@ -325,8 +358,8 @@ TEST_P(Test_TensorFlow_layers, unfused_flatten)
TEST_P(Test_TensorFlow_layers, leaky_relu) TEST_P(Test_TensorFlow_layers, leaky_relu)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
runTensorFlowNet("leaky_relu_order1"); runTensorFlowNet("leaky_relu_order1");
runTensorFlowNet("leaky_relu_order2"); runTensorFlowNet("leaky_relu_order2");
@ -336,10 +369,10 @@ TEST_P(Test_TensorFlow_layers, leaky_relu)
TEST_P(Test_TensorFlow_layers, l2_normalize) TEST_P(Test_TensorFlow_layers, l2_normalize)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
runTensorFlowNet("l2_normalize"); runTensorFlowNet("l2_normalize");
@ -349,14 +382,15 @@ TEST_P(Test_TensorFlow_layers, l2_normalize)
TEST_P(Test_TensorFlow_layers, l2_normalize_3d) TEST_P(Test_TensorFlow_layers, l2_normalize_3d)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019
&& (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16) && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)
) )
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
runTensorFlowNet("l2_normalize_3d"); runTensorFlowNet("l2_normalize_3d");
@ -367,11 +401,11 @@ class Test_TensorFlow_nets : public DNNTestLayer {};
TEST_P(Test_TensorFlow_nets, MobileNet_SSD) TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
{ {
#if INF_ENGINE_VER_MAJOR_GE(2019020000) #if INF_ENGINE_VER_MAJOR_GE(2019020000)
if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
} }
#endif #endif
@ -406,9 +440,9 @@ TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
{ {
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB); applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
checkBackend(); checkBackend();
@ -460,7 +494,7 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.35 : 0.3; float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.35 : 0.3;
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
{ {
scoreDiff = 0.061; scoreDiff = 0.061;
@ -483,16 +517,22 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
static std::string names[] = {"faster_rcnn_inception_v2_coco_2018_01_28", static std::string names[] = {"faster_rcnn_inception_v2_coco_2018_01_28",
"faster_rcnn_resnet50_coco_2018_01_28"}; "faster_rcnn_resnet50_coco_2018_01_28"};
checkBackend();
#ifdef INF_ENGINE_RELEASE #ifdef INF_ENGINE_RELEASE
if (backend == DNN_BACKEND_INFERENCE_ENGINE && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
(INF_ENGINE_VER_MAJOR_LT(2019020000) || target != DNN_TARGET_CPU)) (INF_ENGINE_VER_MAJOR_LT(2019020000) || target != DNN_TARGET_CPU))
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
// segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
// Assertion `prior_height > 0' failed.
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
double scoresDiff = backend == DNN_BACKEND_INFERENCE_ENGINE ? 2.9e-5 : 1e-5; checkBackend();
double scoresDiff = backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ? 2.9e-5 : 1e-5;
for (int i = 0; i < 2; ++i) for (int i = 0; i < 2; ++i)
{ {
std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt"); std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt");
@ -515,8 +555,9 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN) TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD_PPN)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
checkBackend(); checkBackend();
std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt"); std::string proto = findDataFile("dnn/ssd_mobilenet_v1_ppn_coco.pbtxt");
@ -586,12 +627,12 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection)
); );
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16 && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL_FP16 &&
INF_ENGINE_VER_MAJOR_EQ(2019020000)) INF_ENGINE_VER_MAJOR_EQ(2019020000))
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_2019R2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
checkBackend(); checkBackend();
@ -624,7 +665,7 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection)
double l1_geometry = default_l1, lInf_geometry = default_lInf; double l1_geometry = default_l1, lInf_geometry = default_lInf;
if (target == DNN_TARGET_OPENCL_FP16) if (target == DNN_TARGET_OPENCL_FP16)
{ {
lInf_scores = backend == DNN_BACKEND_INFERENCE_ENGINE ? 0.16 : 0.11; lInf_scores = backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ? 0.16 : 0.11;
l1_geometry = 0.28; lInf_geometry = 5.94; l1_geometry = 0.28; lInf_geometry = 5.94;
} }
else if (target == DNN_TARGET_MYRIAD) else if (target == DNN_TARGET_MYRIAD)
@ -684,8 +725,10 @@ TEST_P(Test_TensorFlow_layers, lstm)
{ {
if(backend == DNN_BACKEND_CUDA) if(backend == DNN_BACKEND_CUDA)
applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* not supported */ applyTestTag(CV_TEST_TAG_DNN_SKIP_CUDA); /* not supported */
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
runTensorFlowNet("lstm", true); runTensorFlowNet("lstm", true);
@ -694,12 +737,18 @@ TEST_P(Test_TensorFlow_layers, lstm)
TEST_P(Test_TensorFlow_layers, split) TEST_P(Test_TensorFlow_layers, split)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2) getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
runTensorFlowNet("split"); runTensorFlowNet("split");
if (backend == DNN_BACKEND_INFERENCE_ENGINE) }
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
TEST_P(Test_TensorFlow_layers, split_equals)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTensorFlowNet("split_equals"); runTensorFlowNet("split_equals");
} }
@ -711,9 +760,10 @@ TEST_P(Test_TensorFlow_layers, resize_nearest_neighbor)
TEST_P(Test_TensorFlow_layers, slice) TEST_P(Test_TensorFlow_layers, slice)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
runTensorFlowNet("slice_4d"); runTensorFlowNet("slice_4d");
runTensorFlowNet("strided_slice"); runTensorFlowNet("strided_slice");
} }
@ -727,10 +777,10 @@ TEST_P(Test_TensorFlow_layers, softmax)
TEST_P(Test_TensorFlow_layers, slim_softmax_v2) TEST_P(Test_TensorFlow_layers, slim_softmax_v2)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
runTensorFlowNet("slim_softmax_v2"); runTensorFlowNet("slim_softmax_v2");
} }
@ -743,8 +793,10 @@ TEST_P(Test_TensorFlow_layers, relu6)
TEST_P(Test_TensorFlow_layers, subpixel) TEST_P(Test_TensorFlow_layers, subpixel)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
runTensorFlowNet("subpixel"); runTensorFlowNet("subpixel");
} }
@ -763,10 +815,10 @@ TEST_P(Test_TensorFlow_layers, resize_bilinear)
TEST_P(Test_TensorFlow_layers, squeeze) TEST_P(Test_TensorFlow_layers, squeeze)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2 && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
) )
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
int inpShapes[][4] = {{1, 3, 4, 2}, {1, 3, 1, 2}, {1, 3, 4, 1}, {1, 3, 4, 1}}; // TensorFlow's shape (NHWC) int inpShapes[][4] = {{1, 3, 4, 2}, {1, 3, 1, 2}, {1, 3, 4, 1}, {1, 3, 4, 1}}; // TensorFlow's shape (NHWC)
int outShapes[][3] = {{3, 4, 2}, {1, 3, 2}, {1, 3, 4}, {1, 3, 4}}; int outShapes[][3] = {{3, 4, 2}, {1, 3, 2}, {1, 3, 4}, {1, 3, 4}};

View File

@ -100,7 +100,7 @@ public:
lInf = lInf ? lInf : default_lInf; lInf = lInf ? lInf : default_lInf;
normAssert(outRef, outBlobs[0], "", l1, lInf); normAssert(outRef, outBlobs[0], "", l1, lInf);
if (check2ndBlob && backend != DNN_BACKEND_INFERENCE_ENGINE) if (check2ndBlob && backend == DNN_BACKEND_OPENCV)
{ {
Mat out2 = outBlobs[1]; Mat out2 = outBlobs[1];
Mat ref2 = readTorchBlob(_tf(prefix + "_output_2" + suffix), isBinary); Mat ref2 = readTorchBlob(_tf(prefix + "_output_2" + suffix), isBinary);
@ -136,8 +136,8 @@ TEST_P(Test_Torch_layers, run_reshape_change_batch_size)
TEST_P(Test_Torch_layers, run_reshape) TEST_P(Test_Torch_layers, run_reshape)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
runTorchNet("net_reshape_batch"); runTorchNet("net_reshape_batch");
runTorchNet("net_reshape_channels", "", false, true); runTorchNet("net_reshape_channels", "", false, true);
} }
@ -209,8 +209,8 @@ TEST_P(Test_Torch_layers, net_lp_pooling)
TEST_P(Test_Torch_layers, net_conv_gemm_lrn) TEST_P(Test_Torch_layers, net_conv_gemm_lrn)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
runTorchNet("net_conv_gemm_lrn", "", false, true, true, runTorchNet("net_conv_gemm_lrn", "", false, true, true,
target == DNN_TARGET_OPENCL_FP16 ? 0.046 : 0.0, target == DNN_TARGET_OPENCL_FP16 ? 0.046 : 0.0,
target == DNN_TARGET_OPENCL_FP16 ? 0.023 : 0.0); target == DNN_TARGET_OPENCL_FP16 ? 0.023 : 0.0);
@ -237,9 +237,10 @@ TEST_P(Test_Torch_layers, net_padding)
TEST_P(Test_Torch_layers, net_non_spatial) TEST_P(Test_Torch_layers, net_non_spatial)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE && if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
(target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
runTorchNet("net_non_spatial", "", false, true); runTorchNet("net_non_spatial", "", false, true);
} }
@ -253,9 +254,10 @@ TEST_P(Test_Torch_layers, run_paralel)
TEST_P(Test_Torch_layers, net_residual) TEST_P(Test_Torch_layers, net_residual)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000 #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && (target == DNN_TARGET_OPENCL || if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && (target == DNN_TARGET_OPENCL ||
target == DNN_TARGET_OPENCL_FP16)) target == DNN_TARGET_OPENCL_FP16))
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
runTorchNet("net_residual", "", false, true); runTorchNet("net_residual", "", false, true);
} }
@ -265,8 +267,8 @@ class Test_Torch_nets : public DNNTestLayer {};
TEST_P(Test_Torch_nets, OpenFace_accuracy) TEST_P(Test_Torch_nets, OpenFace_accuracy)
{ {
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
checkBackend(); checkBackend();
@ -341,11 +343,11 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
checkBackend(); checkBackend();
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
throw SkipTestException(""); throw SkipTestException("");
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU)
{ {
if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL); if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
throw SkipTestException(""); throw SkipTestException("");
} }
@ -397,17 +399,17 @@ TEST_P(Test_Torch_nets, ENet_accuracy)
TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy) TEST_P(Test_Torch_nets, FastNeuralStyle_accuracy)
{ {
#if defined INF_ENGINE_RELEASE #if defined INF_ENGINE_RELEASE
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
#endif #endif
checkBackend(); checkBackend();
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
#if INF_ENGINE_RELEASE <= 2018050000 #if INF_ENGINE_RELEASE <= 2018050000
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2018R5); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif #endif
#endif #endif

View File

@ -219,6 +219,8 @@ static inline void applyTestTag(const std::string& tag1, const std::string& tag2
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); checkTestTags(); } { applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); checkTestTags(); }
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4) static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4)
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); applyTestTag_(tag4); checkTestTags(); } { applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); applyTestTag_(tag4); checkTestTags(); }
static inline void applyTestTag(const std::string& tag1, const std::string& tag2, const std::string& tag3, const std::string& tag4, const std::string& tag5)
{ applyTestTag_(tag1); applyTestTag_(tag2); applyTestTag_(tag3); applyTestTag_(tag4); applyTestTag_(tag5); checkTestTags(); }
/** Append global skip test tags /** Append global skip test tags