Remove deprecated Inference Engine CPU extensions

This commit is contained in:
Dmitry Kurtaev 2020-06-22 21:35:52 +03:00
parent ee5ff71d55
commit 3bdd7bfbea

View File

@ -128,7 +128,6 @@ public:
return true;
}
private:
std::map<std::string, InferenceEngine::Parameter> params;
};
@ -136,15 +135,28 @@ private:
class InfEngineNgraphCustomLayer : public InferenceEngine::ILayerExecImpl
{
public:
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
explicit InfEngineNgraphCustomLayer(const std::shared_ptr<ngraph::Node>& _node)
{
node = std::dynamic_pointer_cast<NgraphCustomOp>(_node);
CV_Assert(node);
std::string implStr = node->params["impl"];
std::istringstream iss(implStr);
#else
explicit InfEngineNgraphCustomLayer(const InferenceEngine::CNNLayer& layer) : cnnLayer(layer)
{
std::istringstream iss(layer.GetParamAsString("impl"));
#endif
size_t ptr;
iss >> ptr;
cvLayer = (Layer*)ptr;
std::vector<std::vector<size_t> > shapes;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
strToShapes(node->params["internals"], shapes);
#else
strToShapes(layer.GetParamAsString("internals"), shapes);
#endif
internals.resize(shapes.size());
for (int i = 0; i < shapes.size(); ++i)
internals[i].create(std::vector<int>(shapes[i].begin(), shapes[i].end()), CV_32F);
@ -180,6 +192,29 @@ public:
{
std::vector<InferenceEngine::DataConfig> inDataConfig;
std::vector<InferenceEngine::DataConfig> outDataConfig;
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
InferenceEngine::SizeVector order;
size_t offset = std::numeric_limits<size_t>::max();
for (int i = 0; i < node->get_input_size(); ++i)
{
InferenceEngine::DataConfig conf;
auto shape = node->input_value(i).get_shape();
order.resize(shape.size());
std::iota(order.begin(), order.end(), 0);
conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order, offset});
inDataConfig.push_back(conf);
}
for (int i = 0; i < node->get_output_size(); ++i)
{
InferenceEngine::DataConfig conf;
auto shape = node->output(i).get_shape();
order.resize(shape.size());
std::iota(order.begin(), order.end(), 0);
conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order, offset});
outDataConfig.push_back(conf);
}
#else
for (auto& it : cnnLayer.insData)
{
InferenceEngine::DataConfig conf;
@ -193,6 +228,7 @@ public:
conf.desc = it->getTensorDesc();
outDataConfig.push_back(conf);
}
#endif
InferenceEngine::LayerConfig layerConfig;
layerConfig.inConfs = inDataConfig;
@ -209,12 +245,16 @@ public:
}
private:
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
std::shared_ptr<NgraphCustomOp> node;
#else
InferenceEngine::CNNLayer cnnLayer;
#endif
dnn::Layer* cvLayer;
std::vector<Mat> internals;
};
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
class InfEngineNgraphCustomLayerFactory : public InferenceEngine::ILayerImplFactory {
public:
explicit InfEngineNgraphCustomLayerFactory(const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer)
@ -233,17 +273,29 @@ public:
private:
InferenceEngine::CNNLayer cnnLayer;
};
#endif
class InfEngineNgraphExtension : public InferenceEngine::IExtension
{
public:
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
void Unload() noexcept override {}
void Release() noexcept override { delete this; }
void GetVersion(const InferenceEngine::Version*&) const noexcept override {}
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override {
return {"CPU"};
}
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override {
if (std::dynamic_pointer_cast<NgraphCustomOp>(node) && implType == "CPU") {
return std::make_shared<InfEngineNgraphCustomLayer>(node);
}
return nullptr;
}
#else
virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
#endif
virtual void Unload() noexcept {}
virtual void Release() noexcept {}
virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
InferenceEngine::ResponseDesc*) noexcept
@ -260,6 +312,7 @@ public:
factory = new InfEngineNgraphCustomLayerFactory(cnnLayer);
return InferenceEngine::StatusCode::OK;
}
#endif
};