mirror of
https://github.com/opencv/opencv.git
synced 2025-06-13 04:52:53 +08:00
Merge pull request #17634 from dkurt:dnn_ie_cpu_ext_update_master
This commit is contained in:
commit
365a10f58c
@ -128,7 +128,6 @@ public:
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
|
||||||
std::map<std::string, InferenceEngine::Parameter> params;
|
std::map<std::string, InferenceEngine::Parameter> params;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -136,15 +135,28 @@ private:
|
|||||||
class InfEngineNgraphCustomLayer : public InferenceEngine::ILayerExecImpl
|
class InfEngineNgraphCustomLayer : public InferenceEngine::ILayerExecImpl
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
|
||||||
|
explicit InfEngineNgraphCustomLayer(const std::shared_ptr<ngraph::Node>& _node)
|
||||||
|
{
|
||||||
|
node = std::dynamic_pointer_cast<NgraphCustomOp>(_node);
|
||||||
|
CV_Assert(node);
|
||||||
|
std::string implStr = node->params["impl"];
|
||||||
|
std::istringstream iss(implStr);
|
||||||
|
#else
|
||||||
explicit InfEngineNgraphCustomLayer(const InferenceEngine::CNNLayer& layer) : cnnLayer(layer)
|
explicit InfEngineNgraphCustomLayer(const InferenceEngine::CNNLayer& layer) : cnnLayer(layer)
|
||||||
{
|
{
|
||||||
std::istringstream iss(layer.GetParamAsString("impl"));
|
std::istringstream iss(layer.GetParamAsString("impl"));
|
||||||
|
#endif
|
||||||
size_t ptr;
|
size_t ptr;
|
||||||
iss >> ptr;
|
iss >> ptr;
|
||||||
cvLayer = (Layer*)ptr;
|
cvLayer = (Layer*)ptr;
|
||||||
|
|
||||||
std::vector<std::vector<size_t> > shapes;
|
std::vector<std::vector<size_t> > shapes;
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
|
||||||
|
strToShapes(node->params["internals"], shapes);
|
||||||
|
#else
|
||||||
strToShapes(layer.GetParamAsString("internals"), shapes);
|
strToShapes(layer.GetParamAsString("internals"), shapes);
|
||||||
|
#endif
|
||||||
internals.resize(shapes.size());
|
internals.resize(shapes.size());
|
||||||
for (int i = 0; i < shapes.size(); ++i)
|
for (int i = 0; i < shapes.size(); ++i)
|
||||||
internals[i].create(std::vector<int>(shapes[i].begin(), shapes[i].end()), CV_32F);
|
internals[i].create(std::vector<int>(shapes[i].begin(), shapes[i].end()), CV_32F);
|
||||||
@ -180,6 +192,29 @@ public:
|
|||||||
{
|
{
|
||||||
std::vector<InferenceEngine::DataConfig> inDataConfig;
|
std::vector<InferenceEngine::DataConfig> inDataConfig;
|
||||||
std::vector<InferenceEngine::DataConfig> outDataConfig;
|
std::vector<InferenceEngine::DataConfig> outDataConfig;
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
|
||||||
|
InferenceEngine::SizeVector order;
|
||||||
|
size_t offset = std::numeric_limits<size_t>::max();
|
||||||
|
for (int i = 0; i < node->get_input_size(); ++i)
|
||||||
|
{
|
||||||
|
InferenceEngine::DataConfig conf;
|
||||||
|
auto shape = node->input_value(i).get_shape();
|
||||||
|
order.resize(shape.size());
|
||||||
|
std::iota(order.begin(), order.end(), 0);
|
||||||
|
conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order, offset});
|
||||||
|
inDataConfig.push_back(conf);
|
||||||
|
}
|
||||||
|
|
||||||
|
for (int i = 0; i < node->get_output_size(); ++i)
|
||||||
|
{
|
||||||
|
InferenceEngine::DataConfig conf;
|
||||||
|
auto shape = node->output(i).get_shape();
|
||||||
|
order.resize(shape.size());
|
||||||
|
std::iota(order.begin(), order.end(), 0);
|
||||||
|
conf.desc = InferenceEngine::TensorDesc(InferenceEngine::Precision::FP32, shape, {shape, order, offset});
|
||||||
|
outDataConfig.push_back(conf);
|
||||||
|
}
|
||||||
|
#else
|
||||||
for (auto& it : cnnLayer.insData)
|
for (auto& it : cnnLayer.insData)
|
||||||
{
|
{
|
||||||
InferenceEngine::DataConfig conf;
|
InferenceEngine::DataConfig conf;
|
||||||
@ -193,6 +228,7 @@ public:
|
|||||||
conf.desc = it->getTensorDesc();
|
conf.desc = it->getTensorDesc();
|
||||||
outDataConfig.push_back(conf);
|
outDataConfig.push_back(conf);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
InferenceEngine::LayerConfig layerConfig;
|
InferenceEngine::LayerConfig layerConfig;
|
||||||
layerConfig.inConfs = inDataConfig;
|
layerConfig.inConfs = inDataConfig;
|
||||||
@ -209,12 +245,16 @@ public:
|
|||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
|
||||||
|
std::shared_ptr<NgraphCustomOp> node;
|
||||||
|
#else
|
||||||
InferenceEngine::CNNLayer cnnLayer;
|
InferenceEngine::CNNLayer cnnLayer;
|
||||||
|
#endif
|
||||||
dnn::Layer* cvLayer;
|
dnn::Layer* cvLayer;
|
||||||
std::vector<Mat> internals;
|
std::vector<Mat> internals;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
|
||||||
class InfEngineNgraphCustomLayerFactory : public InferenceEngine::ILayerImplFactory {
|
class InfEngineNgraphCustomLayerFactory : public InferenceEngine::ILayerImplFactory {
|
||||||
public:
|
public:
|
||||||
explicit InfEngineNgraphCustomLayerFactory(const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer)
|
explicit InfEngineNgraphCustomLayerFactory(const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer)
|
||||||
@ -233,17 +273,29 @@ public:
|
|||||||
private:
|
private:
|
||||||
InferenceEngine::CNNLayer cnnLayer;
|
InferenceEngine::CNNLayer cnnLayer;
|
||||||
};
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
class InfEngineNgraphExtension : public InferenceEngine::IExtension
|
class InfEngineNgraphExtension : public InferenceEngine::IExtension
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2)
|
void Unload() noexcept override {}
|
||||||
|
void Release() noexcept override { delete this; }
|
||||||
|
void GetVersion(const InferenceEngine::Version*&) const noexcept override {}
|
||||||
|
|
||||||
|
#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_2)
|
||||||
|
std::vector<std::string> getImplTypes(const std::shared_ptr<ngraph::Node>& node) override {
|
||||||
|
return {"CPU"};
|
||||||
|
}
|
||||||
|
|
||||||
|
InferenceEngine::ILayerImpl::Ptr getImplementation(const std::shared_ptr<ngraph::Node>& node, const std::string& implType) override {
|
||||||
|
if (std::dynamic_pointer_cast<NgraphCustomOp>(node) && implType == "CPU") {
|
||||||
|
return std::make_shared<InfEngineNgraphCustomLayer>(node);
|
||||||
|
}
|
||||||
|
return nullptr;
|
||||||
|
}
|
||||||
|
#else
|
||||||
virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
|
virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
|
||||||
#endif
|
|
||||||
virtual void Unload() noexcept {}
|
|
||||||
virtual void Release() noexcept {}
|
|
||||||
virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
|
|
||||||
|
|
||||||
virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
|
virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
|
||||||
InferenceEngine::ResponseDesc*) noexcept
|
InferenceEngine::ResponseDesc*) noexcept
|
||||||
@ -260,6 +312,7 @@ public:
|
|||||||
factory = new InfEngineNgraphCustomLayerFactory(cnnLayer);
|
factory = new InfEngineNgraphCustomLayerFactory(cnnLayer);
|
||||||
return InferenceEngine::StatusCode::OK;
|
return InferenceEngine::StatusCode::OK;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user