diff --git a/modules/dnn/src/ie_ngraph.cpp b/modules/dnn/src/ie_ngraph.cpp index 7cac0c3593..f02ef45ef1 100644 --- a/modules/dnn/src/ie_ngraph.cpp +++ b/modules/dnn/src/ie_ngraph.cpp @@ -128,7 +128,6 @@ public: return true; } -private: std::map params; }; @@ -136,15 +135,28 @@ private: class InfEngineNgraphCustomLayer : public InferenceEngine::ILayerExecImpl { public: +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2) + explicit InfEngineNgraphCustomLayer(const std::shared_ptr& _node) + { + node = std::dynamic_pointer_cast(_node); + CV_Assert(node); + std::string implStr = node->params["impl"]; + std::istringstream iss(implStr); +#else explicit InfEngineNgraphCustomLayer(const InferenceEngine::CNNLayer& layer) : cnnLayer(layer) { std::istringstream iss(layer.GetParamAsString("impl")); +#endif size_t ptr; iss >> ptr; cvLayer = (Layer*)ptr; std::vector > shapes; +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2) + strToShapes(node->params["internals"], shapes); +#else strToShapes(layer.GetParamAsString("internals"), shapes); +#endif internals.resize(shapes.size()); for (int i = 0; i < shapes.size(); ++i) internals[i].create(std::vector(shapes[i].begin(), shapes[i].end()), CV_32F); @@ -180,6 +192,29 @@ public: { std::vector inDataConfig; std::vector outDataConfig; +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2) + InferenceEngine::SizeVector order; + size_t offset = std::numeric_limits::max(); + for (int i = 0; i < node->get_input_size(); ++i) + { + InferenceEngine::DataConfig conf; + auto shape = node->input_value(i).get_shape(); + order.resize(shape.size()); + std::iota(order.begin(), order.end(), 0); + conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order, offset}); + inDataConfig.push_back(conf); + } + + for (int i = 0; i < node->get_output_size(); ++i) + { + InferenceEngine::DataConfig conf; + auto shape = node->output(i).get_shape(); + order.resize(shape.size()); + std::iota(order.begin(), order.end(), 0); + conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order, offset}); + outDataConfig.push_back(conf); + } +#else for (auto& it : cnnLayer.insData) { InferenceEngine::DataConfig conf; @@ -193,6 +228,7 @@ public: conf.desc = it->getTensorDesc(); outDataConfig.push_back(conf); } +#endif InferenceEngine::LayerConfig layerConfig; layerConfig.inConfs = inDataConfig; @@ -209,12 +245,16 @@ public: } private: +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2) + std::shared_ptr node; +#else InferenceEngine::CNNLayer cnnLayer; +#endif dnn::Layer* cvLayer; std::vector internals; }; - +#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2) class InfEngineNgraphCustomLayerFactory : public InferenceEngine::ILayerImplFactory { public: explicit InfEngineNgraphCustomLayerFactory(const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer) @@ -233,17 +273,29 @@ public: private: InferenceEngine::CNNLayer cnnLayer; }; +#endif class InfEngineNgraphExtension : public InferenceEngine::IExtension { public: -#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2) + void Unload() noexcept override {} + void Release() noexcept override { delete this; } + void GetVersion(const InferenceEngine::Version*&) const noexcept override {} + +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2) + std::vector getImplTypes(const std::shared_ptr& node) override { + return {"CPU"}; + } + + InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr& node, const std::string& implType) override { + if (std::dynamic_pointer_cast(node) && implType == "CPU") { + return std::make_shared(node); + } + return nullptr; + } +#else virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {} -#endif - virtual void Unload() noexcept {} - virtual void Release() noexcept {} - virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {} virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&, InferenceEngine::ResponseDesc*) noexcept @@ -260,6 +312,7 @@ public: factory = new InfEngineNgraphCustomLayerFactory(cnnLayer); return InferenceEngine::StatusCode::OK; } +#endif };