Add a mutex for shared Inference Engine plugins

This commit is contained in:
Dmitry Kurtaev 2019-01-31 16:10:59 +03:00
parent 2f5af1bd33
commit bc4e471847
2 changed files with 43 additions and 2 deletions

View File

@ -622,7 +622,11 @@ void InfEngineBackendNet::init(int targetId)
#endif // IE < R5
static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEnginePluginPtr> sharedPlugins;
static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEnginePluginPtr>& getSharedPlugins()
{
static std::map<InferenceEngine::TargetDevice, InferenceEngine::InferenceEnginePluginPtr> sharedPlugins;
return sharedPlugins;
}
void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
{
@ -630,6 +634,8 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::ICNNNetwork& net)
try
{
AutoLock lock(getInitializationMutex());
auto& sharedPlugins = getSharedPlugins();
auto pluginIt = sharedPlugins.find(targetDevice);
if (pluginIt != sharedPlugins.end())
{
@ -797,7 +803,8 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN
void resetMyriadDevice()
{
#ifdef HAVE_INF_ENGINE
sharedPlugins.erase(InferenceEngine::TargetDevice::eMYRIAD);
AutoLock lock(getInitializationMutex());
getSharedPlugins().erase(InferenceEngine::TargetDevice::eMYRIAD);
#endif // HAVE_INF_ENGINE
}

View File

@ -46,6 +46,10 @@
#include <opencv2/dnn/all_layers.hpp>
#include <opencv2/dnn/layer.details.hpp> // CV_DNN_REGISTER_LAYER_CLASS
#ifdef HAVE_INF_ENGINE
#include <thread>
#endif
namespace opencv_test { namespace {
template<typename TString>
@ -970,6 +974,36 @@ TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
if (targetId != DNN_TARGET_MYRIAD)
normAssert(outs[0], outs[1]);
}
TEST_P(Layer_Test_Convolution_DLDT, multithreading)
{
Target targetId = GetParam();
std::string suffix = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? "_fp16" : "";
std::string xmlPath = _tf("layer_convolution" + suffix + ".xml");
std::string binPath = _tf("layer_convolution" + suffix + ".bin");
Net firstNet = readNet(xmlPath, binPath);
Net secondNet = readNet(xmlPath, binPath);
Mat inp = blobFromNPY(_tf("blob.npy"));
firstNet.setInput(inp);
secondNet.setInput(inp);
firstNet.setPreferableTarget(targetId);
secondNet.setPreferableTarget(targetId);
Mat out1, out2;
std::thread t1([&]{out1 = firstNet.forward();});
std::thread t2([&]{out2 = secondNet.forward();});
t1.join();
t2.join();
Mat ref = blobFromNPY(_tf("layer_convolution.npy"));
double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 1.5e-3 : 1e-5;
double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 1.8e-2 : 1e-4;
normAssert(out1, ref, "first thread", l1, lInf);
normAssert(out2, ref, "second thread", l1, lInf);
}
INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Convolution_DLDT,
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)));