opencv/modules/dnn/src/op_inf_engine.hpp

297 lines
10 KiB
C++
Raw Normal View History

// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018-2019, Intel Corporation, all rights reserved.
// Third party copyrights are property of their respective owners.
#ifndef __OPENCV_DNN_OP_INF_ENGINE_HPP__
#define __OPENCV_DNN_OP_INF_ENGINE_HPP__
#include "opencv2/core/cvdef.h"
2018-07-28 00:56:35 +08:00
#include "opencv2/core/cvstd.hpp"
#include "opencv2/dnn.hpp"
#include "opencv2/dnn/utils/inference_engine.hpp"
#ifdef HAVE_INF_ENGINE
2018-03-15 21:16:56 +08:00
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsuggest-override"
#endif
#include <inference_engine.hpp>
2018-03-15 21:16:56 +08:00
#if defined(__GNUC__) && __GNUC__ >= 5
//#pragma GCC diagnostic pop
#endif
#define INF_ENGINE_RELEASE_2018R3 2018030000
#define INF_ENGINE_RELEASE_2018R4 2018040000
#define INF_ENGINE_RELEASE_2018R5 2018050000
#ifndef INF_ENGINE_RELEASE
#warning("IE version have not been provided via command-line. Using 2018R5 by default")
#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2018R5
#endif
#define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_GE(ver) (((INF_ENGINE_RELEASE) / 10000) >= ((ver) / 10000))
2019-01-14 14:55:44 +08:00
#define INF_ENGINE_VER_MAJOR_LT(ver) (((INF_ENGINE_RELEASE) / 10000) < ((ver) / 10000))
#define INF_ENGINE_VER_MAJOR_EQ(ver) (((INF_ENGINE_RELEASE) / 10000) == ((ver) / 10000))
2019-01-14 14:55:44 +08:00
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
#include <ie_builders.hpp>
#endif
#endif // HAVE_INF_ENGINE
namespace cv { namespace dnn {
#ifdef HAVE_INF_ENGINE
2019-01-14 14:55:44 +08:00
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2018R5)
class InfEngineBackendNet : public InferenceEngine::ICNNNetwork
{
public:
InfEngineBackendNet();
InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
2018-03-15 21:16:56 +08:00
virtual void Release() noexcept CV_OVERRIDE;
void setPrecision(InferenceEngine::Precision p) noexcept;
virtual InferenceEngine::Precision getPrecision() noexcept;
virtual InferenceEngine::Precision getPrecision() const noexcept;
2018-03-15 21:16:56 +08:00
virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) noexcept /*CV_OVERRIDE*/;
2018-03-15 21:16:56 +08:00
virtual void getOutputsInfo(InferenceEngine::OutputsDataMap &out) const noexcept /*CV_OVERRIDE*/;
2018-03-15 21:16:56 +08:00
virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) noexcept /*CV_OVERRIDE*/;
2018-02-14 19:17:44 +08:00
2018-03-15 21:16:56 +08:00
virtual void getInputsInfo(InferenceEngine::InputsDataMap &inputs) const noexcept /*CV_OVERRIDE*/;
virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) noexcept;
virtual InferenceEngine::InputInfo::Ptr getInput(const std::string &inputName) const noexcept;
virtual InferenceEngine::StatusCode serialize(const std::string &xmlPath, const std::string &binPath, InferenceEngine::ResponseDesc* resp) const noexcept;
2018-05-22 20:18:18 +08:00
virtual void getName(char *pName, size_t len) noexcept;
virtual void getName(char *pName, size_t len) const noexcept;
virtual const std::string& getName() const noexcept;
virtual size_t layerCount() noexcept;
virtual size_t layerCount() const noexcept;
2018-03-15 21:16:56 +08:00
virtual InferenceEngine::DataPtr& getData(const char *dname) noexcept CV_OVERRIDE;
virtual void addLayer(const InferenceEngine::CNNLayerPtr &layer) noexcept CV_OVERRIDE;
virtual InferenceEngine::StatusCode addOutput(const std::string &layerName,
size_t outputIndex = 0,
InferenceEngine::ResponseDesc *resp = nullptr) noexcept;
virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
InferenceEngine::CNNLayerPtr &out,
InferenceEngine::ResponseDesc *resp) noexcept;
virtual InferenceEngine::StatusCode getLayerByName(const char *layerName,
InferenceEngine::CNNLayerPtr &out,
InferenceEngine::ResponseDesc *resp) const noexcept;
2018-03-15 21:16:56 +08:00
virtual void setTargetDevice(InferenceEngine::TargetDevice device) noexcept CV_OVERRIDE;
virtual InferenceEngine::TargetDevice getTargetDevice() noexcept;
virtual InferenceEngine::TargetDevice getTargetDevice() const noexcept;
2018-03-15 21:16:56 +08:00
virtual InferenceEngine::StatusCode setBatchSize(const size_t size) noexcept CV_OVERRIDE;
2018-07-28 00:56:35 +08:00
virtual InferenceEngine::StatusCode setBatchSize(size_t size, InferenceEngine::ResponseDesc* responseDesc) noexcept;
2018-03-15 21:16:56 +08:00
virtual size_t getBatchSize() const noexcept CV_OVERRIDE;
virtual InferenceEngine::StatusCode AddExtension(const InferenceEngine::IShapeInferExtensionPtr& extension, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE;
virtual InferenceEngine::StatusCode reshape(const InputShapes& inputShapes, InferenceEngine::ResponseDesc* resp) noexcept CV_OVERRIDE;
2018-07-28 00:56:35 +08:00
void init(int targetId);
void addBlobs(const std::vector<Ptr<BackendWrapper> >& wrappers);
void forward();
bool isInitialized();
private:
std::vector<InferenceEngine::CNNLayerPtr> layers;
InferenceEngine::InputsDataMap inputs;
InferenceEngine::OutputsDataMap outputs;
InferenceEngine::BlobMap inpBlobs;
InferenceEngine::BlobMap outBlobs;
InferenceEngine::BlobMap allBlobs;
InferenceEngine::TargetDevice targetDevice;
InferenceEngine::Precision precision;
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
InferenceEngine::ExecutableNetwork netExec;
InferenceEngine::InferRequest infRequest;
// In case of models from Model Optimizer we need to manage their lifetime.
InferenceEngine::CNNNetwork netOwner;
2018-12-07 17:40:34 +08:00
// There is no way to check if netOwner is initialized or not so we use
// a separate flag to determine if the model has been loaded from IR.
bool hasNetOwner;
std::string name;
void initPlugin(InferenceEngine::ICNNNetwork& net);
};
2019-01-14 14:55:44 +08:00
#else // IE < R5
class InfEngineBackendNet
{
public:
InfEngineBackendNet();
InfEngineBackendNet(InferenceEngine::CNNNetwork& net);
void addLayer(InferenceEngine::Builder::Layer& layer);
2019-01-14 14:55:44 +08:00
void addOutput(const std::string& name);
void connect(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendWrapper> >& outputs,
const std::string& layerName);
bool isInitialized();
void init(int targetId);
void forward();
void initPlugin(InferenceEngine::ICNNNetwork& net);
void addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs);
private:
InferenceEngine::Builder::Network netBuilder;
InferenceEngine::InferenceEnginePluginPtr enginePtr;
InferenceEngine::InferencePlugin plugin;
InferenceEngine::ExecutableNetwork netExec;
InferenceEngine::InferRequest infRequest;
InferenceEngine::BlobMap allBlobs;
InferenceEngine::BlobMap inpBlobs;
InferenceEngine::BlobMap outBlobs;
InferenceEngine::TargetDevice targetDevice;
InferenceEngine::CNNNetwork cnn;
bool hasNetOwner;
std::map<std::string, int> layers;
std::vector<std::string> requestedOutputs;
std::set<int> unconnectedLayersIds;
};
#endif // IE < R5
class InfEngineBackendNode : public BackendNode
{
public:
2019-01-14 14:55:44 +08:00
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
#else
InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& layer);
2019-01-14 14:55:44 +08:00
#endif
void connect(std::vector<Ptr<BackendWrapper> >& inputs,
std::vector<Ptr<BackendWrapper> >& outputs);
// Inference Engine network object that allows to obtain the outputs of this layer.
2019-01-14 14:55:44 +08:00
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
InferenceEngine::Builder::Layer layer;
Ptr<InfEngineBackendNet> net;
2019-01-14 14:55:44 +08:00
#else
InferenceEngine::CNNLayerPtr layer;
Ptr<InfEngineBackendNet> net;
#endif
};
class InfEngineBackendWrapper : public BackendWrapper
{
public:
InfEngineBackendWrapper(int targetId, const Mat& m);
InfEngineBackendWrapper(Ptr<BackendWrapper> wrapper);
~InfEngineBackendWrapper();
static Ptr<BackendWrapper> create(Ptr<BackendWrapper> wrapper);
2018-03-15 21:16:56 +08:00
virtual void copyToHost() CV_OVERRIDE;
2018-03-15 21:16:56 +08:00
virtual void setHostDirty() CV_OVERRIDE;
InferenceEngine::DataPtr dataPtr;
InferenceEngine::Blob::Ptr blob;
};
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, InferenceEngine::Layout layout = InferenceEngine::Layout::ANY);
InferenceEngine::Blob::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape, InferenceEngine::Layout layout);
InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5)
void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l);
#endif
// This is a fake class to run networks from Model Optimizer. Objects of that
// class simulate responses of layers are imported by OpenCV and supported by
// Inference Engine. The main difference is that they do not perform forward pass.
class InfEngineBackendLayer : public Layer
{
public:
2019-01-11 01:29:44 +08:00
InfEngineBackendLayer(const InferenceEngine::CNNNetwork &t_net_) : t_net(t_net_) {};
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
2018-03-15 21:16:56 +08:00
std::vector<MatShape> &internals) const CV_OVERRIDE;
virtual void forward(InputArrayOfArrays inputs, OutputArrayOfArrays outputs,
2018-03-15 21:16:56 +08:00
OutputArrayOfArrays internals) CV_OVERRIDE;
2018-03-15 21:16:56 +08:00
virtual bool supportBackend(int backendId) CV_OVERRIDE;
private:
2019-01-11 01:29:44 +08:00
InferenceEngine::CNNNetwork t_net;
};
CV__DNN_EXPERIMENTAL_NS_BEGIN
bool isMyriadX();
CV__DNN_EXPERIMENTAL_NS_END
#endif // HAVE_INF_ENGINE
bool haveInfEngine();
void forwardInfEngine(Ptr<BackendNode>& node);
}} // namespace dnn, namespace cv
#endif // __OPENCV_DNN_OP_INF_ENGINE_HPP__