Merge pull request #15249 from dkurt:dnn_ie_tests_2019r2

This commit is contained in:
Alexander Alekhin 2019-08-12 16:32:38 +00:00
commit e0cb01e2cf
3 changed files with 17 additions and 31 deletions

View File

@ -123,9 +123,12 @@ PERF_TEST_P_(DNNTestNetwork, SSD)
PERF_TEST_P_(DNNTestNetwork, OpenFace) PERF_TEST_P_(DNNTestNetwork, OpenFace)
{ {
if (backend == DNN_BACKEND_HALIDE || if (backend == DNN_BACKEND_HALIDE)
(backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD))
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("");
#endif
processNet("dnn/openface_nn4.small2.v1.t7", "", "", processNet("dnn/openface_nn4.small2.v1.t7", "", "",
Mat(cv::Size(96, 96), CV_32FC3)); Mat(cv::Size(96, 96), CV_32FC3));
} }
@ -185,16 +188,6 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_SSD_TensorFlow)
{ {
if (backend == DNN_BACKEND_HALIDE) if (backend == DNN_BACKEND_HALIDE)
throw SkipTestException(""); throw SkipTestException("");
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
throw SkipTestException("Test is disabled for MyriadX");
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
throw SkipTestException("Test is disabled for Myriad in OpenVINO 2019R2");
#endif
processNet("dnn/ssd_inception_v2_coco_2017_11_17.pb", "ssd_inception_v2_coco_2017_11_17.pbtxt", "", processNet("dnn/ssd_inception_v2_coco_2017_11_17.pb", "ssd_inception_v2_coco_2017_11_17.pbtxt", "",
Mat(cv::Size(300, 300), CV_32FC3)); Mat(cv::Size(300, 300), CV_32FC3));
} }

View File

@ -357,11 +357,9 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
{ {
#if INF_ENGINE_VER_MAJOR_EQ(2019010000) #if INF_ENGINE_VER_MAJOR_GE(2019020000)
if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#else
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif #endif
} }
#endif #endif
@ -395,16 +393,10 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
TEST_P(Test_TensorFlow_nets, Inception_v2_SSD) TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
{ {
applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB); applyTestTag(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB);
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2019010000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD &&
{ getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
#if INF_ENGINE_VER_MAJOR_LE(2019010000)
if (getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
#else
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif
}
#endif #endif
checkBackend(); checkBackend();
@ -456,12 +448,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.35 : 0.3; float detectionConfThresh = (target == DNN_TARGET_MYRIAD) ? 0.35 : 0.3;
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD &&
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
) {
scoreDiff = 0.061; scoreDiff = 0.061;
iouDiff = 0.12; iouDiff = 0.12;
detectionConfThresh = 0.36; detectionConfThresh = 0.36;
}
#endif #endif
normAssertDetections(ref, out, "", detectionConfThresh, scoreDiff, iouDiff); normAssertDetections(ref, out, "", detectionConfThresh, scoreDiff, iouDiff);
expectNoFallbacksFromIE(net); expectNoFallbacksFromIE(net);

View File

@ -262,7 +262,7 @@ class Test_Torch_nets : public DNNTestLayer {};
TEST_P(Test_Torch_nets, OpenFace_accuracy) TEST_P(Test_Torch_nets, OpenFace_accuracy)
{ {
#if defined(INF_ENGINE_RELEASE) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
#endif #endif
@ -287,8 +287,8 @@ TEST_P(Test_Torch_nets, OpenFace_accuracy)
// Reference output values are in range [-0.17212, 0.263492] // Reference output values are in range [-0.17212, 0.263492]
// on Myriad problem layer: l4_Pooling - does not use pads_begin // on Myriad problem layer: l4_Pooling - does not use pads_begin
float l1 = (target == DNN_TARGET_OPENCL_FP16) ? 4e-4 : 1e-5; float l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 2e-3 : 1e-5;
float lInf = (target == DNN_TARGET_OPENCL_FP16) ? 1.5e-3 : 1e-3; float lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 5e-3 : 1e-3;
Mat outRef = readTorchBlob(_tf("net_openface_output.dat"), true); Mat outRef = readTorchBlob(_tf("net_openface_output.dat"), true);
normAssert(out, outRef, "", l1, lInf); normAssert(out, outRef, "", l1, lInf);
} }