mirror of
https://github.com/opencv/opencv.git
synced 2025-06-11 20:09:23 +08:00
Blank and L2-normalization layers from Intel's Inference Engine
This commit is contained in:
parent
0b9d075958
commit
b92c3182ab
@ -40,6 +40,7 @@
|
||||
//
|
||||
//M*/
|
||||
#include "../precomp.hpp"
|
||||
#include "../op_inf_engine.hpp"
|
||||
|
||||
namespace cv
|
||||
{
|
||||
@ -53,6 +54,12 @@ public:
|
||||
setParamsFrom(params);
|
||||
}
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine();
|
||||
}
|
||||
|
||||
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
||||
const int requiredOutputs,
|
||||
std::vector<MatShape> &outputs,
|
||||
@ -104,6 +111,19 @@ public:
|
||||
if (outputs[i].data != inputs[i]->data)
|
||||
inputs[i]->copyTo(outputs[i]);
|
||||
}
|
||||
|
||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
||||
{
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
InferenceEngine::LayerParams lp;
|
||||
lp.name = name;
|
||||
lp.type = "Split";
|
||||
lp.precision = InferenceEngine::Precision::FP32;
|
||||
std::shared_ptr<InferenceEngine::SplitLayer> ieLayer(new InferenceEngine::SplitLayer(lp));
|
||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
||||
#endif // HAVE_INF_ENGINE
|
||||
return Ptr<BackendNode>();
|
||||
}
|
||||
};
|
||||
|
||||
Ptr<Layer> BlankLayer::create(const LayerParams& params)
|
||||
|
@ -42,6 +42,7 @@
|
||||
|
||||
#include "../precomp.hpp"
|
||||
#include "layers_common.hpp"
|
||||
#include "../op_inf_engine.hpp"
|
||||
|
||||
namespace cv { namespace dnn {
|
||||
|
||||
@ -60,6 +61,13 @@ public:
|
||||
CV_Assert(pnorm > 0);
|
||||
}
|
||||
|
||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||
{
|
||||
return backendId == DNN_BACKEND_DEFAULT ||
|
||||
backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine() &&
|
||||
pnorm == 2 && !blobs.empty();
|
||||
}
|
||||
|
||||
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
||||
const int requiredOutputs,
|
||||
std::vector<MatShape> &outputs,
|
||||
@ -228,6 +236,28 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
||||
{
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
InferenceEngine::LayerParams lp;
|
||||
lp.name = name;
|
||||
lp.type = "Normalize";
|
||||
lp.precision = InferenceEngine::Precision::FP32;
|
||||
std::shared_ptr<InferenceEngine::CNNLayer> ieLayer(new InferenceEngine::CNNLayer(lp));
|
||||
|
||||
CV_Assert(!blobs.empty());
|
||||
|
||||
ieLayer->params["eps"] = format("%f", epsilon);
|
||||
ieLayer->params["across_spatial"] = acrossSpatial ? "1" : "0";
|
||||
ieLayer->params["channel_shared"] = blobs[0].total() == 1 ? "1" : "0";
|
||||
|
||||
const int numChannels = blobs[0].total();
|
||||
ieLayer->blobs["weights"] = wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C);
|
||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
||||
#endif // HAVE_INF_ENGINE
|
||||
return Ptr<BackendNode>();
|
||||
}
|
||||
|
||||
private:
|
||||
int startAxis, endAxis;
|
||||
};
|
||||
|
@ -18,6 +18,11 @@ namespace cv { namespace dnn {
|
||||
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
|
||||
static int infEngineVersion()
|
||||
{
|
||||
return std::atoi(InferenceEngine::GetInferenceEngineVersion()->buildNumber);
|
||||
}
|
||||
|
||||
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::CNNLayerPtr& _layer)
|
||||
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
|
||||
|
||||
@ -58,9 +63,23 @@ static InferenceEngine::DataPtr wrapToInfEngineDataNode(const Mat& m, const std:
|
||||
{
|
||||
std::vector<size_t> reversedShape(&m.size[0], &m.size[0] + m.dims);
|
||||
std::reverse(reversedShape.begin(), reversedShape.end());
|
||||
return InferenceEngine::DataPtr(
|
||||
new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32)
|
||||
);
|
||||
if (infEngineVersion() > 5855)
|
||||
{
|
||||
InferenceEngine::Layout l = InferenceEngine::Layout::ANY;
|
||||
if (m.dims == 4)
|
||||
l = InferenceEngine::Layout::NCHW;
|
||||
else if (m.dims == 2)
|
||||
l = InferenceEngine::Layout::NC;
|
||||
return InferenceEngine::DataPtr(
|
||||
new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32, l)
|
||||
);
|
||||
}
|
||||
else
|
||||
{
|
||||
return InferenceEngine::DataPtr(
|
||||
new InferenceEngine::Data(name, reversedShape, InferenceEngine::Precision::FP32)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
InferenceEngine::TBlob<float>::Ptr wrapToInfEngineBlob(const Mat& m, const std::vector<size_t>& shape,
|
||||
@ -336,10 +355,9 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
|
||||
|
||||
InferenceEngine::StatusCode status;
|
||||
InferenceEngine::ResponseDesc resp;
|
||||
const InferenceEngine::Version* v = InferenceEngine::GetInferenceEngineVersion();
|
||||
|
||||
plugin = InferenceEngine::PluginDispatcher({""}).getSuitablePlugin(targetDevice);
|
||||
if (std::atoi(v->buildNumber) > 5855)
|
||||
if (infEngineVersion() > 5855 && targetDevice == InferenceEngine::TargetDevice::eCPU)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
InferenceEngine::IExtensionPtr extension =
|
||||
|
Loading…
Reference in New Issue
Block a user