mirror of
https://github.com/opencv/opencv.git
synced 2025-06-20 01:41:14 +08:00
Merge pull request #13387 from dkurt:dnn_minor_ie_fixes
This commit is contained in:
commit
eb1f7797e4
@ -152,6 +152,7 @@ InfEngineBackendNet::InfEngineBackendNet()
|
|||||||
{
|
{
|
||||||
targetDevice = InferenceEngine::TargetDevice::eCPU;
|
targetDevice = InferenceEngine::TargetDevice::eCPU;
|
||||||
precision = InferenceEngine::Precision::FP32;
|
precision = InferenceEngine::Precision::FP32;
|
||||||
|
hasNetOwner = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net)
|
InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net)
|
||||||
@ -162,6 +163,7 @@ InfEngineBackendNet::InfEngineBackendNet(InferenceEngine::CNNNetwork& net)
|
|||||||
outputs = net.getOutputsInfo();
|
outputs = net.getOutputsInfo();
|
||||||
layers.resize(net.layerCount()); // A hack to execute InfEngineBackendNet::layerCount correctly.
|
layers.resize(net.layerCount()); // A hack to execute InfEngineBackendNet::layerCount correctly.
|
||||||
netOwner = net;
|
netOwner = net;
|
||||||
|
hasNetOwner = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void InfEngineBackendNet::Release() noexcept
|
void InfEngineBackendNet::Release() noexcept
|
||||||
@ -178,12 +180,12 @@ void InfEngineBackendNet::setPrecision(InferenceEngine::Precision p) noexcept
|
|||||||
|
|
||||||
InferenceEngine::Precision InfEngineBackendNet::getPrecision() noexcept
|
InferenceEngine::Precision InfEngineBackendNet::getPrecision() noexcept
|
||||||
{
|
{
|
||||||
return precision;
|
return hasNetOwner ? netOwner.getPrecision() : precision;
|
||||||
}
|
}
|
||||||
|
|
||||||
InferenceEngine::Precision InfEngineBackendNet::getPrecision() const noexcept
|
InferenceEngine::Precision InfEngineBackendNet::getPrecision() const noexcept
|
||||||
{
|
{
|
||||||
return precision;
|
return hasNetOwner ? netOwner.getPrecision() : precision;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assume that outputs of network is unconnected blobs.
|
// Assume that outputs of network is unconnected blobs.
|
||||||
|
@ -134,6 +134,9 @@ private:
|
|||||||
InferenceEngine::InferRequest infRequest;
|
InferenceEngine::InferRequest infRequest;
|
||||||
// In case of models from Model Optimizer we need to manage their lifetime.
|
// In case of models from Model Optimizer we need to manage their lifetime.
|
||||||
InferenceEngine::CNNNetwork netOwner;
|
InferenceEngine::CNNNetwork netOwner;
|
||||||
|
// There is no way to check if netOwner is initialized or not so we use
|
||||||
|
// a separate flag to determine if the model has been loaded from IR.
|
||||||
|
bool hasNetOwner;
|
||||||
|
|
||||||
std::string name;
|
std::string name;
|
||||||
|
|
||||||
|
@ -471,6 +471,7 @@ TEST(Test_Caffe, shared_weights)
|
|||||||
|
|
||||||
net.setInput(blob_1, "input_1");
|
net.setInput(blob_1, "input_1");
|
||||||
net.setInput(blob_2, "input_2");
|
net.setInput(blob_2, "input_2");
|
||||||
|
net.setPreferableBackend(DNN_BACKEND_OPENCV);
|
||||||
|
|
||||||
Mat sum = net.forward();
|
Mat sum = net.forward();
|
||||||
|
|
||||||
|
@ -306,7 +306,7 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
|
|||||||
// batch size 1
|
// batch size 1
|
||||||
testDarknetModel(config_file, weights_file, ref.rowRange(0, 2), scoreDiff, iouDiff);
|
testDarknetModel(config_file, weights_file, ref.rowRange(0, 2), scoreDiff, iouDiff);
|
||||||
|
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_MYRIAD)
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_MYRIAD)
|
||||||
#endif
|
#endif
|
||||||
// batch size 2
|
// batch size 2
|
||||||
|
@ -166,7 +166,7 @@ TEST_P(Deconvolution, Accuracy)
|
|||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU &&
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU &&
|
||||||
dilation.width == 2 && dilation.height == 2)
|
dilation.width == 2 && dilation.height == 2)
|
||||||
throw SkipTestException("");
|
throw SkipTestException("");
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU &&
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_CPU &&
|
||||||
hasBias && group != 1)
|
hasBias && group != 1)
|
||||||
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
|
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
|
||||||
|
@ -137,7 +137,7 @@ TEST_P(Test_Caffe_layers, Convolution)
|
|||||||
|
|
||||||
TEST_P(Test_Caffe_layers, DeConvolution)
|
TEST_P(Test_Caffe_layers, DeConvolution)
|
||||||
{
|
{
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU)
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU)
|
||||||
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
|
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
|
||||||
#endif
|
#endif
|
||||||
@ -918,8 +918,11 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_DWconv_Prelu, Combine(Values(3, 6), Val
|
|||||||
// Using Intel's Model Optimizer generate .xml and .bin files:
|
// Using Intel's Model Optimizer generate .xml and .bin files:
|
||||||
// ./ModelOptimizer -w /path/to/caffemodel -d /path/to/prototxt \
|
// ./ModelOptimizer -w /path/to/caffemodel -d /path/to/prototxt \
|
||||||
// -p FP32 -i -b ${batch_size} -o /path/to/output/folder
|
// -p FP32 -i -b ${batch_size} -o /path/to/output/folder
|
||||||
TEST(Layer_Test_Convolution_DLDT, Accuracy)
|
typedef testing::TestWithParam<Target> Layer_Test_Convolution_DLDT;
|
||||||
|
TEST_P(Layer_Test_Convolution_DLDT, Accuracy)
|
||||||
{
|
{
|
||||||
|
Target targetId = GetParam();
|
||||||
|
|
||||||
Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt"));
|
Net netDefault = readNet(_tf("layer_convolution.caffemodel"), _tf("layer_convolution.prototxt"));
|
||||||
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
|
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
|
||||||
|
|
||||||
@ -930,6 +933,10 @@ TEST(Layer_Test_Convolution_DLDT, Accuracy)
|
|||||||
Mat outDefault = netDefault.forward();
|
Mat outDefault = netDefault.forward();
|
||||||
|
|
||||||
net.setInput(inp);
|
net.setInput(inp);
|
||||||
|
net.setPreferableTarget(targetId);
|
||||||
|
|
||||||
|
if (targetId != DNN_TARGET_MYRIAD)
|
||||||
|
{
|
||||||
Mat out = net.forward();
|
Mat out = net.forward();
|
||||||
|
|
||||||
normAssert(outDefault, out);
|
normAssert(outDefault, out);
|
||||||
@ -937,10 +944,18 @@ TEST(Layer_Test_Convolution_DLDT, Accuracy)
|
|||||||
std::vector<int> outLayers = net.getUnconnectedOutLayers();
|
std::vector<int> outLayers = net.getUnconnectedOutLayers();
|
||||||
ASSERT_EQ(net.getLayer(outLayers[0])->name, "output_merge");
|
ASSERT_EQ(net.getLayer(outLayers[0])->name, "output_merge");
|
||||||
ASSERT_EQ(net.getLayer(outLayers[0])->type, "Concat");
|
ASSERT_EQ(net.getLayer(outLayers[0])->type, "Concat");
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// An assertion is expected because the model is in FP32 format but
|
||||||
|
// Myriad plugin supports only FP16 models.
|
||||||
|
ASSERT_ANY_THROW(net.forward());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST(Layer_Test_Convolution_DLDT, setInput_uint8)
|
TEST_P(Layer_Test_Convolution_DLDT, setInput_uint8)
|
||||||
{
|
{
|
||||||
|
Target targetId = GetParam();
|
||||||
Mat inp = blobFromNPY(_tf("blob.npy"));
|
Mat inp = blobFromNPY(_tf("blob.npy"));
|
||||||
|
|
||||||
Mat inputs[] = {Mat(inp.dims, inp.size, CV_8U), Mat()};
|
Mat inputs[] = {Mat(inp.dims, inp.size, CV_8U), Mat()};
|
||||||
@ -951,12 +966,25 @@ TEST(Layer_Test_Convolution_DLDT, setInput_uint8)
|
|||||||
for (int i = 0; i < 2; ++i)
|
for (int i = 0; i < 2; ++i)
|
||||||
{
|
{
|
||||||
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
|
Net net = readNet(_tf("layer_convolution.xml"), _tf("layer_convolution.bin"));
|
||||||
|
net.setPreferableTarget(targetId);
|
||||||
net.setInput(inputs[i]);
|
net.setInput(inputs[i]);
|
||||||
|
if (targetId != DNN_TARGET_MYRIAD)
|
||||||
|
{
|
||||||
outs[i] = net.forward();
|
outs[i] = net.forward();
|
||||||
ASSERT_EQ(outs[i].type(), CV_32F);
|
ASSERT_EQ(outs[i].type(), CV_32F);
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// An assertion is expected because the model is in FP32 format but
|
||||||
|
// Myriad plugin supports only FP16 models.
|
||||||
|
ASSERT_ANY_THROW(net.forward());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (targetId != DNN_TARGET_MYRIAD)
|
||||||
normAssert(outs[0], outs[1]);
|
normAssert(outs[0], outs[1]);
|
||||||
}
|
}
|
||||||
|
INSTANTIATE_TEST_CASE_P(/**/, Layer_Test_Convolution_DLDT,
|
||||||
|
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE)));
|
||||||
|
|
||||||
// 1. Create a .prototxt file with the following network:
|
// 1. Create a .prototxt file with the following network:
|
||||||
// layer {
|
// layer {
|
||||||
@ -980,14 +1008,17 @@ TEST(Layer_Test_Convolution_DLDT, setInput_uint8)
|
|||||||
// net.save('/path/to/caffemodel')
|
// net.save('/path/to/caffemodel')
|
||||||
//
|
//
|
||||||
// 3. Convert using ModelOptimizer.
|
// 3. Convert using ModelOptimizer.
|
||||||
typedef testing::TestWithParam<tuple<int, int> > Test_DLDT_two_inputs;
|
typedef testing::TestWithParam<tuple<int, int, Target> > Test_DLDT_two_inputs;
|
||||||
TEST_P(Test_DLDT_two_inputs, as_IR)
|
TEST_P(Test_DLDT_two_inputs, as_IR)
|
||||||
{
|
{
|
||||||
int firstInpType = get<0>(GetParam());
|
int firstInpType = get<0>(GetParam());
|
||||||
int secondInpType = get<1>(GetParam());
|
int secondInpType = get<1>(GetParam());
|
||||||
// TODO: It looks like a bug in Inference Engine.
|
Target targetId = get<2>(GetParam());
|
||||||
|
|
||||||
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018040000
|
||||||
if (secondInpType == CV_8U)
|
if (secondInpType == CV_8U)
|
||||||
throw SkipTestException("");
|
throw SkipTestException("Test is enabled starts from OpenVINO 2018R4");
|
||||||
|
#endif
|
||||||
|
|
||||||
Net net = readNet(_tf("net_two_inputs.xml"), _tf("net_two_inputs.bin"));
|
Net net = readNet(_tf("net_two_inputs.xml"), _tf("net_two_inputs.bin"));
|
||||||
int inpSize[] = {1, 2, 3};
|
int inpSize[] = {1, 2, 3};
|
||||||
@ -998,11 +1029,21 @@ TEST_P(Test_DLDT_two_inputs, as_IR)
|
|||||||
|
|
||||||
net.setInput(firstInp, "data");
|
net.setInput(firstInp, "data");
|
||||||
net.setInput(secondInp, "second_input");
|
net.setInput(secondInp, "second_input");
|
||||||
|
net.setPreferableTarget(targetId);
|
||||||
|
if (targetId != DNN_TARGET_MYRIAD)
|
||||||
|
{
|
||||||
Mat out = net.forward();
|
Mat out = net.forward();
|
||||||
|
|
||||||
Mat ref;
|
Mat ref;
|
||||||
cv::add(firstInp, secondInp, ref, Mat(), CV_32F);
|
cv::add(firstInp, secondInp, ref, Mat(), CV_32F);
|
||||||
normAssert(out, ref);
|
normAssert(out, ref);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
// An assertion is expected because the model is in FP32 format but
|
||||||
|
// Myriad plugin supports only FP16 models.
|
||||||
|
ASSERT_ANY_THROW(net.forward());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(Test_DLDT_two_inputs, as_backend)
|
TEST_P(Test_DLDT_two_inputs, as_backend)
|
||||||
@ -1010,6 +1051,8 @@ TEST_P(Test_DLDT_two_inputs, as_backend)
|
|||||||
static const float kScale = 0.5f;
|
static const float kScale = 0.5f;
|
||||||
static const float kScaleInv = 1.0f / kScale;
|
static const float kScaleInv = 1.0f / kScale;
|
||||||
|
|
||||||
|
Target targetId = get<2>(GetParam());
|
||||||
|
|
||||||
Net net;
|
Net net;
|
||||||
LayerParams lp;
|
LayerParams lp;
|
||||||
lp.type = "Eltwise";
|
lp.type = "Eltwise";
|
||||||
@ -1018,9 +1061,9 @@ TEST_P(Test_DLDT_two_inputs, as_backend)
|
|||||||
int eltwiseId = net.addLayerToPrev(lp.name, lp.type, lp); // connect to a first input
|
int eltwiseId = net.addLayerToPrev(lp.name, lp.type, lp); // connect to a first input
|
||||||
net.connect(0, 1, eltwiseId, 1); // connect to a second input
|
net.connect(0, 1, eltwiseId, 1); // connect to a second input
|
||||||
|
|
||||||
int inpSize[] = {1, 2, 3};
|
int inpSize[] = {1, 2, 3, 4};
|
||||||
Mat firstInp(3, &inpSize[0], get<0>(GetParam()));
|
Mat firstInp(4, &inpSize[0], get<0>(GetParam()));
|
||||||
Mat secondInp(3, &inpSize[0], get<1>(GetParam()));
|
Mat secondInp(4, &inpSize[0], get<1>(GetParam()));
|
||||||
randu(firstInp, 0, 255);
|
randu(firstInp, 0, 255);
|
||||||
randu(secondInp, 0, 255);
|
randu(secondInp, 0, 255);
|
||||||
|
|
||||||
@ -1028,15 +1071,20 @@ TEST_P(Test_DLDT_two_inputs, as_backend)
|
|||||||
net.setInput(firstInp, "data", kScale);
|
net.setInput(firstInp, "data", kScale);
|
||||||
net.setInput(secondInp, "second_input", kScaleInv);
|
net.setInput(secondInp, "second_input", kScaleInv);
|
||||||
net.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);
|
net.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);
|
||||||
|
net.setPreferableTarget(targetId);
|
||||||
Mat out = net.forward();
|
Mat out = net.forward();
|
||||||
|
|
||||||
Mat ref;
|
Mat ref;
|
||||||
addWeighted(firstInp, kScale, secondInp, kScaleInv, 0, ref, CV_32F);
|
addWeighted(firstInp, kScale, secondInp, kScaleInv, 0, ref, CV_32F);
|
||||||
normAssert(out, ref);
|
// Output values are in range [0, 637.5].
|
||||||
|
double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.06 : 1e-6;
|
||||||
|
double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.3 : 1e-5;
|
||||||
|
normAssert(out, ref, "", l1, lInf);
|
||||||
}
|
}
|
||||||
|
|
||||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine(
|
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine(
|
||||||
Values(CV_8U, CV_32F), Values(CV_8U, CV_32F)
|
Values(CV_8U, CV_32F), Values(CV_8U, CV_32F),
|
||||||
|
testing::ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
|
||||||
));
|
));
|
||||||
|
|
||||||
class UnsupportedLayer : public Layer
|
class UnsupportedLayer : public Layer
|
||||||
|
@ -136,7 +136,7 @@ TEST_P(Test_Torch_layers, run_reshape_change_batch_size)
|
|||||||
|
|
||||||
TEST_P(Test_Torch_layers, run_reshape)
|
TEST_P(Test_Torch_layers, run_reshape)
|
||||||
{
|
{
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||||
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
|
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
|
||||||
#endif
|
#endif
|
||||||
@ -172,7 +172,7 @@ TEST_P(Test_Torch_layers, run_depth_concat)
|
|||||||
|
|
||||||
TEST_P(Test_Torch_layers, run_deconv)
|
TEST_P(Test_Torch_layers, run_deconv)
|
||||||
{
|
{
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018040000
|
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2018040000
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||||
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
|
throw SkipTestException("Test is disabled for OpenVINO 2018R4");
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
Reference in New Issue
Block a user