mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 17:44:04 +08:00
Fix multiple networks with Intel's Inference Engine backend
This commit is contained in:
parent
1822e85f4a
commit
ab389142af
@ -361,10 +361,20 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
|
||||
{
|
||||
CV_Assert(!isInitialized());
|
||||
|
||||
InferenceEngine::StatusCode status;
|
||||
InferenceEngine::ResponseDesc resp;
|
||||
static std::map<std::string, InferenceEngine::InferenceEnginePluginPtr> sharedPlugins;
|
||||
std::string deviceName = InferenceEngine::getDeviceName(targetDevice);
|
||||
auto pluginIt = sharedPlugins.find(deviceName);
|
||||
if (pluginIt != sharedPlugins.end())
|
||||
{
|
||||
enginePtr = pluginIt->second;
|
||||
}
|
||||
else
|
||||
{
|
||||
enginePtr = InferenceEngine::PluginDispatcher({""}).getSuitablePlugin(targetDevice);
|
||||
sharedPlugins[deviceName] = enginePtr;
|
||||
}
|
||||
plugin = InferenceEngine::InferencePlugin(enginePtr);
|
||||
|
||||
plugin = InferenceEngine::PluginDispatcher({""}).getSuitablePlugin(targetDevice);
|
||||
if (targetDevice == InferenceEngine::TargetDevice::eCPU)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
@ -374,18 +384,17 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
|
||||
InferenceEngine::IExtensionPtr extension =
|
||||
InferenceEngine::make_so_pointer<InferenceEngine::IExtension>("libcpu_extension.so");
|
||||
#endif // _WIN32
|
||||
status = plugin->AddExtension(extension, &resp);
|
||||
if (status != InferenceEngine::StatusCode::OK)
|
||||
CV_Error(Error::StsAssert, resp.msg);
|
||||
plugin.AddExtension(extension);
|
||||
}
|
||||
status = plugin->LoadNetwork(net, &resp);
|
||||
if (status != InferenceEngine::StatusCode::OK)
|
||||
CV_Error(Error::StsAssert, resp.msg);
|
||||
netExec = plugin.LoadNetwork(net, {});
|
||||
infRequest = netExec.CreateInferRequest();
|
||||
infRequest.SetInput(inpBlobs);
|
||||
infRequest.SetOutput(outBlobs);
|
||||
}
|
||||
|
||||
bool InfEngineBackendNet::isInitialized()
|
||||
{
|
||||
return (bool)plugin;
|
||||
return (bool)enginePtr;
|
||||
}
|
||||
|
||||
void InfEngineBackendNet::addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs)
|
||||
@ -399,10 +408,7 @@ void InfEngineBackendNet::addBlobs(const std::vector<Ptr<BackendWrapper> >& ptrs
|
||||
|
||||
void InfEngineBackendNet::forward()
|
||||
{
|
||||
InferenceEngine::ResponseDesc resp;
|
||||
InferenceEngine::StatusCode status = plugin->Infer(inpBlobs, outBlobs, &resp);
|
||||
if (status != InferenceEngine::StatusCode::OK)
|
||||
CV_Error(Error::StsAssert, resp.msg);
|
||||
infRequest.Infer();
|
||||
}
|
||||
|
||||
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
|
||||
|
@ -89,7 +89,10 @@ private:
|
||||
InferenceEngine::BlobMap allBlobs;
|
||||
InferenceEngine::TargetDevice targetDevice;
|
||||
InferenceEngine::Precision precision;
|
||||
InferenceEngine::InferenceEnginePluginPtr plugin;
|
||||
InferenceEngine::InferenceEnginePluginPtr enginePtr;
|
||||
InferenceEngine::InferencePlugin plugin;
|
||||
InferenceEngine::ExecutableNetwork netExec;
|
||||
InferenceEngine::InferRequest infRequest;
|
||||
|
||||
void initPlugin(InferenceEngine::ICNNNetwork& net);
|
||||
};
|
||||
|
@ -887,6 +887,31 @@ TEST(Test_DLDT, fused_output)
|
||||
ASSERT_NO_THROW(net.forward());
|
||||
LayerFactory::unregisterLayer("Unsupported");
|
||||
}
|
||||
|
||||
TEST(Test_DLDT, multiple_networks)
|
||||
{
|
||||
Net nets[2];
|
||||
for (int i = 0; i < 2; ++i)
|
||||
{
|
||||
nets[i].setInputsNames(std::vector<String>(1, format("input_%d", i)));
|
||||
|
||||
LayerParams lp;
|
||||
lp.set("kernel_size", 1);
|
||||
lp.set("num_output", 1);
|
||||
lp.set("bias_term", false);
|
||||
lp.type = "Convolution";
|
||||
lp.name = format("testConv_%d", i);
|
||||
lp.blobs.push_back(Mat({1, 1, 1, 1}, CV_32F, Scalar(1 + i)));
|
||||
nets[i].addLayerToPrev(lp.name, lp.type, lp);
|
||||
nets[i].setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);
|
||||
nets[i].setInput(Mat({1, 1, 1, 1}, CV_32FC1, Scalar(1)));
|
||||
}
|
||||
Mat out_1 = nets[0].forward();
|
||||
Mat out_2 = nets[1].forward();
|
||||
// After the second model is initialized we try to receive an output from the first network again.
|
||||
out_1 = nets[0].forward();
|
||||
normAssert(2 * out_1, out_2);
|
||||
}
|
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
// Test a custom layer.
|
||||
|
Loading…
Reference in New Issue
Block a user