opencv/modules/dnn/src/ie_ngraph.hpp
Dmitry Kurtaev 8681686d8f
Merge pull request #22957 from dkurt:new_openvino_api
Switch to new OpenVINO API after 2022.1 release

* Pass Layer_Test_Convolution_DLDT.Accuracy/0 test

* Pass test Test_Caffe_layers.Softmax

* Failed 136 tests

* Fix Concat. Failed 120 tests

* Custom nGraph ops. 19 failed tests

* Set and get properties from Core

* Read model from buffer

* Change MaxPooling layer output names. Restore reshape

* Cosmetic changes

* Cosmetic changes

* Override getOutputsInfo

* Fixes for OpenVINO < 2022.1

* Async inference for 2021.4 and less

* Compile model with config

* Fix serialize for 2022.1

* Asynchronous inference with 2022.1

* Handle 1d outputs

* Work with model with dynamic output shape

* Fixes with 1d output for old API

* Control outputs by nGraph function for all OpenVINO versions

* Refer inputs in PrePostProcessor by indices

* Fix cycled dependency between InfEngineNgraphNode and InfEngineNgraphNet.
Add InferRequest callback only for async inference. Do not capture InferRequest object.

* Fix tests thresholds

* Fix HETERO:GPU,CPU plugin issues with unsupported layer
2022-12-23 16:58:41 +00:00

165 lines
4.9 KiB
C++

// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#ifndef __OPENCV_DNN_IE_NGRAPH_HPP__
#define __OPENCV_DNN_IE_NGRAPH_HPP__
#include "op_inf_engine.hpp"
#ifdef HAVE_DNN_NGRAPH
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4245)
#pragma warning(disable : 4268)
#endif
#include <ngraph/ngraph.hpp>
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif // HAVE_DNN_NGRAPH
namespace cv { namespace dnn {
#ifdef HAVE_DNN_NGRAPH
class InfEngineNgraphNode;
class InfEngineNgraphNet
{
public:
InfEngineNgraphNet(detail::NetImplBase& netImpl);
InfEngineNgraphNet(detail::NetImplBase& netImpl, InferenceEngine::CNNNetwork& net);
void addOutput(const Ptr<InfEngineNgraphNode>& node);
bool isInitialized();
void init(Target targetId);
void forward(const std::vector<Ptr<BackendWrapper> >& outBlobsWrappers, bool isAsync);
void initPlugin(InferenceEngine::CNNNetwork& net);
ngraph::ParameterVector setInputs(const std::vector<cv::Mat>& inputs, const std::vector<std::string>& names);
void addBlobs(const std::vector<cv::Ptr<BackendWrapper> >& ptrs);
void createNet(Target targetId);
void setNodePtr(std::shared_ptr<ngraph::Node>* ptr);
void reset();
//private:
detail::NetImplBase& netImpl_;
void release();
int getNumComponents();
void dfs(std::shared_ptr<ngraph::Node>& node, std::vector<std::shared_ptr<ngraph::Node>>& comp,
std::unordered_map<std::string, bool>& used);
ngraph::ParameterVector inputs_vec;
std::shared_ptr<ngraph::Function> ngraph_function;
std::vector<std::vector<std::shared_ptr<ngraph::Node>>> components;
std::unordered_map<std::string, std::shared_ptr<ngraph::Node>* > all_nodes;
InferenceEngine::ExecutableNetwork netExec;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
std::map<std::string, ov::Tensor> allBlobs;
#else
InferenceEngine::BlobMap allBlobs;
#endif
std::string device_name;
bool isInit = false;
struct NgraphReqWrapper
{
NgraphReqWrapper() : isReady(true) {}
void makePromises(const std::vector<Ptr<BackendWrapper> >& outs);
InferenceEngine::InferRequest req;
std::vector<cv::AsyncPromise> outProms;
std::vector<std::string> outsNames;
bool isReady;
};
std::vector<Ptr<NgraphReqWrapper> > infRequests;
InferenceEngine::CNNNetwork cnn;
bool hasNetOwner;
std::unordered_map<std::string, InfEngineNgraphNode*> requestedOutputs;
};
class InfEngineNgraphNode : public BackendNode
{
public:
InfEngineNgraphNode(const std::vector<Ptr<BackendNode> >& nodes, Ptr<Layer>& layer,
std::vector<Mat*>& inputs, std::vector<Mat>& outputs,
std::vector<Mat>& internals);
InfEngineNgraphNode(std::shared_ptr<ngraph::Node>&& _node);
InfEngineNgraphNode(const std::shared_ptr<ngraph::Node>& _node);
void setName(const std::string& name);
// Inference Engine network object that allows to obtain the outputs of this layer.
std::shared_ptr<ngraph::Node> node;
Ptr<InfEngineNgraphNet> net;
Ptr<dnn::Layer> cvLayer;
};
class NgraphBackendWrapper : public BackendWrapper
{
public:
NgraphBackendWrapper(int targetId, const Mat& m);
NgraphBackendWrapper(Ptr<BackendWrapper> wrapper);
~NgraphBackendWrapper();
static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
virtual void copyToHost() CV_OVERRIDE;
virtual void setHostDirty() CV_OVERRIDE;
Mat* host;
std::string name;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2022_1)
ov::Tensor blob;
#else
InferenceEngine::Blob::Ptr blob;
#endif
AsyncArray futureMat;
};
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class NgraphBackendLayer : public Layer
{
public:
NgraphBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
std::vector<MatShape> &internals) const CV_OVERRIDE;
virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
OutputArrayOfArrays internals) CV_OVERRIDE;
virtual bool supportBackend(int backendId) CV_OVERRIDE;
private:
InferenceEngine::CNNNetwork t_net;
};
#endif // HAVE_DNN_NGRAPH
}} // namespace cv::dnn
#endif // __OPENCV_DNN_IE_NGRAPH_HPP__