diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index f8a3dcab9b..9e7c24020f 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -2102,7 +2102,7 @@ struct Net::Impl auto ieInpNode = inputNodes[i].dynamicCast(); CV_Assert(oid < ieInpNode->node->get_output_size()); -#if INF_ENGINE_VER_MAJOR_GT(2020020000) +#if INF_ENGINE_VER_MAJOR_GT(2020030000) inputNodes[i] = Ptr(new InfEngineNgraphNode(ieInpNode->node->get_output_as_single_output_node(oid))); #else inputNodes[i] = Ptr(new InfEngineNgraphNode(ieInpNode->node->get_output_as_single_output_node(oid, false))); diff --git a/modules/dnn/test/test_darknet_importer.cpp b/modules/dnn/test/test_darknet_importer.cpp index 6d88e3c4d4..e76c16e273 100644 --- a/modules/dnn/test/test_darknet_importer.cpp +++ b/modules/dnn/test/test_darknet_importer.cpp @@ -544,6 +544,10 @@ TEST_P(Test_Darknet_layers, reorg) TEST_P(Test_Darknet_layers, maxpool) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif testDarknetLayer("maxpool"); } diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index 3b43d33c71..295d93071e 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -350,6 +350,11 @@ TEST_P(MaxPooling, Accuracy) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_VERSION); #endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif + LayerParams lp; lp.set("pool", "max"); lp.set("kernel_w", kernel.width); diff --git a/modules/dnn/test/test_ie_models.cpp b/modules/dnn/test/test_ie_models.cpp index d8096daa7b..db5dcfc077 100644 --- a/modules/dnn/test/test_ie_models.cpp +++ b/modules/dnn/test/test_ie_models.cpp @@ -134,6 +134,8 @@ static inline void genData(const InferenceEngine::TensorDesc& desc, Mat& m, Blob void runIE(Target target, const std::string& xmlPath, const std::string& binPath, std::map& inputsMap, std::map& outputsMap) { + SCOPED_TRACE("runIE"); + CNNNetReader reader; reader.ReadNetwork(xmlPath); reader.ReadWeights(binPath); @@ -247,6 +249,8 @@ void runCV(Backend backendId, Target targetId, const std::string& xmlPath, const const std::map& inputsMap, std::map& outputsMap) { + SCOPED_TRACE("runOCV"); + Net net = readNet(xmlPath, binPath); for (auto& it : inputsMap) net.setInput(it.second, it.first); @@ -301,8 +305,8 @@ TEST_P(DNNTestOpenVINO, models) // Single Myriad device cannot be shared across multiple processes. if (targetId == DNN_TARGET_MYRIAD) resetMyriadDevice(); - runIE(targetId, xmlPath, binPath, inputsMap, ieOutputsMap); - runCV(backendId, targetId, xmlPath, binPath, inputsMap, cvOutputsMap); + EXPECT_NO_THROW(runIE(targetId, xmlPath, binPath, inputsMap, ieOutputsMap)) << "runIE"; + EXPECT_NO_THROW(runCV(backendId, targetId, xmlPath, binPath, inputsMap, cvOutputsMap)) << "runCV"; double eps = 0; #if INF_ENGINE_VER_MAJOR_GE(2020010000) diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index fd69f91a92..d2a8010e9f 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -95,7 +95,14 @@ TEST_P(Test_ONNX_layers, InstanceNorm) TEST_P(Test_ONNX_layers, MaxPooling) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif testONNXModels("maxpooling", npy, 0, 0, false, false); +} +TEST_P(Test_ONNX_layers, MaxPooling_2) +{ testONNXModels("two_maxpooling", npy, 0, 0, false, false); } diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index b71dfbc84a..6738f1b910 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -128,13 +128,32 @@ TEST_P(Test_TensorFlow_layers, reduce_mean) runTensorFlowNet("global_pool_by_axis"); } -TEST_P(Test_TensorFlow_layers, conv) +TEST_P(Test_TensorFlow_layers, conv_single_conv) { runTensorFlowNet("single_conv"); +} +TEST_P(Test_TensorFlow_layers, conv_atrous_conv2d_valid) +{ runTensorFlowNet("atrous_conv2d_valid"); +} +TEST_P(Test_TensorFlow_layers, conv_atrous_conv2d_same) +{ runTensorFlowNet("atrous_conv2d_same"); +} +TEST_P(Test_TensorFlow_layers, conv_depthwise_conv2d) +{ runTensorFlowNet("depthwise_conv2d"); +} +TEST_P(Test_TensorFlow_layers, conv_keras_atrous_conv2d_same) +{ runTensorFlowNet("keras_atrous_conv2d_same"); +} +TEST_P(Test_TensorFlow_layers, conv_pool_nchw) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif runTensorFlowNet("conv_pool_nchw"); } @@ -277,11 +296,32 @@ TEST_P(Test_TensorFlow_layers, slim_batch_norm) runTensorFlowNet("slim_batch_norm", false, l1, lInf); } -TEST_P(Test_TensorFlow_layers, pooling) +TEST_P(Test_TensorFlow_layers, pooling_max_pool_even) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif runTensorFlowNet("max_pool_even"); +} +TEST_P(Test_TensorFlow_layers, pooling_max_pool_odd_valid) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif runTensorFlowNet("max_pool_odd_valid"); +} +TEST_P(Test_TensorFlow_layers, pooling_max_pool_odd_same) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif runTensorFlowNet("max_pool_odd_same"); +} +TEST_P(Test_TensorFlow_layers, pooling_reduce_mean) +{ runTensorFlowNet("reduce_mean"); // an average pooling over all spatial dimensions. } @@ -738,23 +778,67 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection) INSTANTIATE_TEST_CASE_P(/**/, Test_TensorFlow_nets, dnnBackendsAndTargets()); -TEST_P(Test_TensorFlow_layers, fp16_weights) + +TEST_P(Test_TensorFlow_layers, fp16_weights_fp16_single_conv) { - float l1 = 0.00078; - float lInf = 0.012; + float l1 = 0.00078, lInf = 0.012; runTensorFlowNet("fp16_single_conv", false, l1, lInf); +} +TEST_P(Test_TensorFlow_layers, fp16_weights_fp16_max_pool_odd_same) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif + float l1 = 0.00078, lInf = 0.012; runTensorFlowNet("fp16_max_pool_odd_same", false, l1, lInf); +} +TEST_P(Test_TensorFlow_layers, fp16_weights_fp16_eltwise_add_mul) +{ + float l1 = 0.00078, lInf = 0.012; runTensorFlowNet("fp16_eltwise_add_mul", false, l1, lInf); +} +TEST_P(Test_TensorFlow_layers, fp16_weights_fp16_pad_and_concat) +{ + float l1 = 0.00078, lInf = 0.012; runTensorFlowNet("fp16_pad_and_concat", false, l1, lInf); +} +TEST_P(Test_TensorFlow_layers, fp16_weights_fp16_padding_valid) +{ + float l1 = 0.00078, lInf = 0.012; runTensorFlowNet("fp16_padding_valid", false, l1, lInf); +} +TEST_P(Test_TensorFlow_layers, fp16_weights_fp16_max_pool_even) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif + float l1 = 0.00078, lInf = 0.012; // Reference output values are in range [0.0889, 1.651] runTensorFlowNet("fp16_max_pool_even", false, (target == DNN_TARGET_MYRIAD) ? 0.003 : l1, lInf); +} +TEST_P(Test_TensorFlow_layers, fp16_weights_fp16_deconvolution) +{ + float l1 = 0.00078, lInf = 0.012; if (target == DNN_TARGET_MYRIAD) { l1 = 0.0041; lInf = 0.024; } // Reference output values are in range [0, 10.75] runTensorFlowNet("fp16_deconvolution", false, l1, lInf); +} +TEST_P(Test_TensorFlow_layers, fp16_weights_fp16_max_pool_odd_valid) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif + float l1 = 0.00078, lInf = 0.012; + if (target == DNN_TARGET_MYRIAD) { + l1 = 0.0041; + lInf = 0.024; + } // Reference output values are in range [0.418, 2.297] runTensorFlowNet("fp16_max_pool_odd_valid", false, l1, lInf); }