diff --git a/modules/dnn/test/test_backends.cpp b/modules/dnn/test/test_backends.cpp index 19e0729abb..d1df4c35aa 100644 --- a/modules/dnn/test/test_backends.cpp +++ b/modules/dnn/test/test_backends.cpp @@ -248,7 +248,7 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_v1_TensorFlow_Different_Width_Height) { if (backend == DNN_BACKEND_HALIDE) applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); -#if defined(INF_ENGINE_RELEASE) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); diff --git a/modules/dnn/test/test_caffe_importer.cpp b/modules/dnn/test/test_caffe_importer.cpp index 18b8d5ad82..7249fb4e9f 100644 --- a/modules/dnn/test/test_caffe_importer.cpp +++ b/modules/dnn/test/test_caffe_importer.cpp @@ -112,10 +112,12 @@ TEST(Test_Caffe, read_googlenet) TEST_P(Test_Caffe_nets, Axpy) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +#endif String proto = _tf("axpy.prototxt"); Net net = readNetFromCaffe(proto); @@ -150,8 +152,17 @@ TEST_P(Test_Caffe_nets, Axpy) } } } - float l1 = (target == DNN_TARGET_OPENCL_FP16) ? 2e-4 : 1e-5; - float lInf = (target == DNN_TARGET_OPENCL_FP16) ? 1e-3 : 1e-4; + float l1 = 1e-5, lInf = 1e-4; + if (target == DNN_TARGET_OPENCL_FP16) + { + l1 = 2e-4; + lInf = 1e-3; + } + if (target == DNN_TARGET_MYRIAD) + { + l1 = 0.001; + lInf = 0.001; + } normAssert(ref, out, "", l1, lInf); } @@ -657,7 +668,7 @@ TEST_P(Test_Caffe_nets, FasterRCNN_vgg16) CV_TEST_TAG_DEBUG_VERYLONG ); -#if defined(INF_ENGINE_RELEASE) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); @@ -668,6 +679,19 @@ TEST_P(Test_Caffe_nets, FasterRCNN_vgg16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); #endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + // IE exception: Ngraph operation Reshape with name rpn_cls_score_reshape has dynamic output shape on 0 port, but CPU plug-in supports only static shape + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) + applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, + CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION + ); + // Check 'backward_compatible_check || in_out_elements_equal' failed at core/src/op/reshape.cpp:390: + // While validating node 'v1::Reshape bbox_pred_reshape (bbox_pred[0]:f32{1,84}, Constant_241202[0]:i64{4}) -> (f32{?,?,?,?})' with friendly_name 'bbox_pred_reshape': + // Requested output shape {1,6300,4,1} is incompatible with input shape Shape{1, 84} + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif + static Mat ref = (Mat_(3, 7) << 0, 2, 0.949398, 99.2454, 210.141, 601.205, 462.849, 0, 7, 0.997022, 481.841, 92.3218, 722.685, 175.953, 0, 12, 0.993028, 133.221, 189.377, 350.994, 563.166); diff --git a/modules/dnn/test/test_darknet_importer.cpp b/modules/dnn/test/test_darknet_importer.cpp index 885bd6eb6b..f069ad190d 100644 --- a/modules/dnn/test/test_darknet_importer.cpp +++ b/modules/dnn/test/test_darknet_importer.cpp @@ -121,7 +121,7 @@ public: { SCOPED_TRACE("batch size 2"); -#if defined(INF_ENGINE_RELEASE) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (target == DNN_TARGET_MYRIAD && name == "shortcut") applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); #endif @@ -429,22 +429,31 @@ TEST_P(Test_Darknet_nets_async, Accuracy) { Backend backendId = get<0>(get<1>(GetParam())); Target targetId = get<1>(get<1>(GetParam())); + std::string prefix = get<0>(GetParam()); + applyTestTag(CV_TEST_TAG_MEMORY_512MB); + +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (INF_ENGINE_VER_MAJOR_LT(2019020000) && backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); - applyTestTag(CV_TEST_TAG_MEMORY_512MB); if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); - - std::string prefix = get<0>(GetParam()); - - if (targetId == DNN_TARGET_MYRIAD && prefix == "yolov4") // NC_OUT_OF_MEMORY - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) throw SkipTestException("No support for async forward"); +#if defined(INF_ENGINE_RELEASE) +#if INF_ENGINE_VER_MAJOR_GE(2021040000) + if (targetId == DNN_TARGET_MYRIAD && prefix == "yolov3") // NC_OUT_OF_MEMORY + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#else + if (targetId == DNN_TARGET_MYRIAD && prefix == "yolov4") // NC_OUT_OF_MEMORY + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif +#endif + const int numInputs = 2; std::vector inputs(numInputs); int blobSize[] = {1, 3, 416, 416}; @@ -472,6 +481,34 @@ TEST_P(Test_Darknet_nets_async, Accuracy) netAsync.setPreferableBackend(backendId); netAsync.setPreferableTarget(targetId); + double l1 = 0.0; + double lInf = 0.0; +#if defined(INF_ENGINE_RELEASE) + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (targetId == DNN_TARGET_MYRIAD && prefix == "yolo-voc") + { + l1 = 0.02; + lInf = 0.15; + } + if (targetId == DNN_TARGET_OPENCL_FP16 && prefix == "yolo-voc") + { + l1 = 0.02; + lInf = 0.1; + } + if (targetId == DNN_TARGET_OPENCL_FP16 && prefix == "yolov3") + { + l1 = 0.001; + lInf = 0.007; + } + if (targetId == DNN_TARGET_OPENCL_FP16 && prefix == "yolov4") + { + l1 = 0.001; + lInf = 0.005; + } + } +#endif + // Run asynchronously. To make test more robust, process inputs in the reversed order. for (int i = numInputs - 1; i >= 0; --i) { @@ -481,7 +518,7 @@ TEST_P(Test_Darknet_nets_async, Accuracy) ASSERT_TRUE(out.valid()); Mat result; EXPECT_TRUE(out.get(result, async_timeout)); - normAssert(refs[i], result, format("Index: %d", i).c_str(), 0, 0); + normAssert(refs[i], result, format("Index: %d", i).c_str(), l1, lInf); } } @@ -836,10 +873,23 @@ TEST_P(Test_Darknet_layers, avgpool_softmax) TEST_P(Test_Darknet_layers, region) { -#if defined(INF_ENGINE_RELEASE) - if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && INF_ENGINE_VER_MAJOR_GE(2020020000)) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && INF_ENGINE_VER_MAJOR_GE(2020020000)) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); #endif + +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + // accuracy on CPU, OpenCL + // Expected: (normInf) <= (lInf), actual: 0.763223 vs 0.0001 + // |ref| = 1.207319974899292 + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) + applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, + CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION + ); +#endif + testDarknetLayer("region"); } diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index b24968b8e8..405587eec7 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -240,9 +240,11 @@ TEST_P(LRN, Accuracy) Backend backendId = get<0>(get<5>(GetParam())); Target targetId = get<1>(get<5>(GetParam())); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if ((inSize.width == 5 || inSize.height == 5) && targetId == DNN_TARGET_MYRIAD && nrmType == "ACROSS_CHANNELS") applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); +#endif LayerParams lp; lp.set("norm_region", nrmType); @@ -407,12 +409,14 @@ TEST_P(FullyConnected, Accuracy) bool hasBias = get<3>(GetParam()); Backend backendId = get<0>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam())); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && (targetId == DNN_TARGET_OPENCL_FP16 || (targetId == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X))) { applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X); } +#endif Mat weights(outChannels, inChannels * inSize.height * inSize.width, CV_32F); randu(weights, -1.0f, 1.0f); @@ -430,7 +434,21 @@ TEST_P(FullyConnected, Accuracy) int sz[] = {1, inChannels, inSize.height, inSize.width}; Mat input(4, &sz[0], CV_32F); - test(lp, input, backendId, targetId); + + double l1 = 0.0; + double lInf = 0.0; +#if defined(INF_ENGINE_RELEASE) + if (targetId == DNN_TARGET_MYRIAD) + { + l1 = 0.015; + lInf = 0.025; + } + else if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL_FP16) + { + l1 = 0.01; + } +#endif + test(lp, input, backendId, targetId, false, l1, lInf); } INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, FullyConnected, Combine( @@ -812,18 +830,18 @@ TEST_P(Eltwise, Accuracy) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION); #endif -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && numConv > 1) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION); #endif -#if defined(INF_ENGINE_RELEASE) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_OPENCL && op == "sum" && numConv == 1 && !weighted) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); #endif -#if defined(INF_ENGINE_RELEASE) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && numConv > 1) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); #endif diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index e61fd733f9..2d4f78c88c 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -194,13 +194,23 @@ TEST_P(Test_Caffe_layers, DeConvolution) TEST_P(Test_Caffe_layers, InnerProduct) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + // IE exception: Ngraph operation Reshape with name Reshape_4219609 has dynamic output shape on 0 port, but CPU plug-in supports only static shape + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) + applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, + CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION + ); +#endif if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); + testLayerUsingCaffeModels("layer_inner_product", true); } @@ -295,10 +305,12 @@ TEST_P(Test_Caffe_layers, Concat) CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION); #endif +#if INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif #endif testLayerUsingCaffeModels("layer_concat"); diff --git a/modules/dnn/test/test_misc.cpp b/modules/dnn/test/test_misc.cpp index 9971450478..0de503ac7d 100644 --- a/modules/dnn/test/test_misc.cpp +++ b/modules/dnn/test/test_misc.cpp @@ -578,7 +578,8 @@ TEST_P(Async, create_layer_pipeline_set_and_forward_all) if (backendId != DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && backendId != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) throw SkipTestException("No support for async forward"); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + // Exception: Default implementation fallbacks in asynchronous mode + if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && dtype == CV_8U) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 0fd5d08258..a432f2251e 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -185,17 +185,11 @@ TEST_P(Test_ONNX_layers, Gather) TEST_P(Test_ONNX_layers, Convolution3D) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); -#endif testONNXModels("conv3d"); } TEST_P(Test_ONNX_layers, Convolution3D_bias) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); -#endif testONNXModels("conv3d_bias"); } @@ -222,14 +216,73 @@ TEST_P(Test_ONNX_layers, Deconvolution) TEST_P(Test_ONNX_layers, Deconvolution3D) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2018050000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + // [ GENERAL_ERROR ] vpu/graph_transformer/src/frontend/frontend.cpp:439 Failed to compile layer "2": + // [ GENERAL_ERROR ] vpu/graph_transformer/src/model/model.cpp:198 duplicateData error: while duplicating 2@weights Const data got different desc and content byte sizes (162 and 486 respectively) + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + } #endif - if (backend == DNN_BACKEND_OPENCV || target != DNN_TARGET_CPU) - throw SkipTestException("Only DLIE backend on CPU is supported"); + + if (backend == DNN_BACKEND_OPENCV) + throw SkipTestException("OpenCV backend is not supported"); // FIXIT use tags + testONNXModels("deconv3d"); +} + +TEST_P(Test_ONNX_layers, Deconvolution3D_bias) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + // [ GENERAL_ERROR ] vpu/graph_transformer/src/frontend/frontend.cpp:439 Failed to compile layer "2": + // [ GENERAL_ERROR ] vpu/graph_transformer/src/model/model.cpp:198 duplicateData error: while duplicating 2@weights Const data got different desc and content byte sizes (162 and 486 respectively) + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + } +#endif + + if (backend == DNN_BACKEND_OPENCV) + throw SkipTestException("OpenCV backend is not supported"); // FIXIT use tags + testONNXModels("deconv3d_bias"); +} + +TEST_P(Test_ONNX_layers, Deconvolution3D_pad) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + // [ GENERAL_ERROR ] vpu/graph_transformer/src/frontend/frontend.cpp:439 Failed to compile layer "2": + // [ GENERAL_ERROR ] vpu/graph_transformer/src/model/model.cpp:198 duplicateData error: while duplicating 2@weights Const data got different desc and content byte sizes (162 and 486 respectively) + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + } +#endif + + if (backend == DNN_BACKEND_OPENCV) + throw SkipTestException("OpenCV backend is not supported"); // FIXIT use tags + testONNXModels("deconv3d_pad"); +} + +TEST_P(Test_ONNX_layers, Deconvolution3D_adjpad) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + // [ GENERAL_ERROR ] vpu/graph_transformer/src/frontend/frontend.cpp:439 Failed to compile layer "2": + // [ GENERAL_ERROR ] vpu/graph_transformer/src/model/model.cpp:198 duplicateData error: while duplicating 2@weights Const data got different desc and content byte sizes (162 and 486 respectively) + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + } +#endif + + if (backend == DNN_BACKEND_OPENCV) + throw SkipTestException("OpenCV backend is not supported"); // FIXIT use tags + testONNXModels("deconv3d_adjpad"); } @@ -295,12 +348,15 @@ TEST_P(Test_ONNX_layers, Scale) TEST_P(Test_ONNX_layers, ReduceMean3D) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported - if (target != DNN_TARGET_CPU) - throw SkipTestException("Only CPU is supported"); +#endif + if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU) + throw SkipTestException("Only CPU is supported"); // FIXIT use tags + testONNXModels("reduce_mean3d"); } @@ -340,13 +396,12 @@ TEST_P(Test_ONNX_layers, Concatenation) TEST_P(Test_ONNX_layers, Eltwise3D) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); -#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported +#endif testONNXModels("eltwise3d"); } @@ -357,43 +412,56 @@ TEST_P(Test_ONNX_layers, AveragePooling) TEST_P(Test_ONNX_layers, MaxPooling3D) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + // accuracy + if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16) + applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, + CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION + ); + // IE exception: [ GENERAL_ERROR ] AssertionFailed: !expired() + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + } #endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported - if (target != DNN_TARGET_CPU) - throw SkipTestException("Only CPU is supported"); +#endif + if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU) + throw SkipTestException("Only CPU is supported"); // FIXIT use tags + testONNXModels("max_pool3d", npy, 0, 0, false, false); } TEST_P(Test_ONNX_layers, AvePooling3D) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); -#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported - if (target != DNN_TARGET_CPU) - throw SkipTestException("Only CPU is supported"); +#endif + if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU) + throw SkipTestException("Only CPU is supported"); // FIXIT use tags + testONNXModels("ave_pool3d"); } TEST_P(Test_ONNX_layers, PoolConv3D) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); -#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported - if (target != DNN_TARGET_CPU) - throw SkipTestException("Only CPU is supported"); +#endif + if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU) + throw SkipTestException("Only CPU is supported"); // FIXIT use tags + testONNXModels("pool_conv_3d"); } @@ -875,6 +943,7 @@ TEST_P(Test_ONNX_layers, DynamicAxes) TEST_P(Test_ONNX_layers, MaxPool1d) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); @@ -883,11 +952,20 @@ TEST_P(Test_ONNX_layers, MaxPool1d) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); } +#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + { + // 2021.4: [ GENERAL_ERROR ] AssertionFailed: !expired() + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + } +#endif testONNXModels("maxpooling_1d"); } TEST_P(Test_ONNX_layers, MaxPoolSigmoid1d) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); @@ -896,11 +974,13 @@ TEST_P(Test_ONNX_layers, MaxPoolSigmoid1d) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); } +#endif testONNXModels("maxpooling_sigmoid_1d"); } TEST_P(Test_ONNX_layers, MaxPool1d_Twise) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); @@ -909,11 +989,13 @@ TEST_P(Test_ONNX_layers, MaxPool1d_Twise) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); } +#endif testONNXModels("two_maxpooling_1d"); } TEST_P(Test_ONNX_layers, AvePool1d) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); @@ -922,11 +1004,13 @@ TEST_P(Test_ONNX_layers, AvePool1d) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); } +#endif testONNXModels("average_pooling_1d"); } TEST_P(Test_ONNX_layers, PoolConv1d) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); @@ -935,6 +1019,7 @@ TEST_P(Test_ONNX_layers, PoolConv1d) { if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); } +#endif testONNXModels("pool_conv_1d"); } @@ -1014,11 +1099,18 @@ TEST_P(Test_ONNX_nets, Squeezenet) TEST_P(Test_ONNX_nets, Googlenet) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + // accuracy + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif const String model = _tf("models/googlenet.onnx", false); @@ -1264,7 +1356,7 @@ TEST_P(Test_ONNX_nets, DenseNet121) TEST_P(Test_ONNX_nets, Inception_v1) { -#if defined(INF_ENGINE_RELEASE) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD); @@ -1274,26 +1366,35 @@ TEST_P(Test_ONNX_nets, Inception_v1) TEST_P(Test_ONNX_nets, Shufflenet) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) { if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (target == DNN_TARGET_OPENCL) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); } +#endif testONNXModels("shufflenet", pb); } TEST_P(Test_ONNX_nets, Resnet34_kinetics) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); -#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported - if (target != DNN_TARGET_CPU) - throw SkipTestException("Only CPU is supported"); +#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + // IE exception: Function contains several inputs and outputs with one friendly name! + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + } +#endif + if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU) + throw SkipTestException("Only CPU is supported"); // FIXIT use tags String onnxmodel = findDataFile("dnn/resnet-34_kinetics.onnx", false); Mat image0 = imread(findDataFile("dnn/dog416.png")); @@ -1334,6 +1435,11 @@ TEST_P(Test_ONNX_nets, Resnet34_kinetics) // output range [-5, 11] float l1 = 0.0013; float lInf = 0.009; + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16) + { + l1 = 0.02; + lInf = 0.07; + } checkBackend(&input0, &ref0); net.setInput(input0); diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index d02b7136d6..073a2f3395 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -215,13 +215,12 @@ TEST_P(Test_TensorFlow_layers, conv_pool_nchw) TEST_P(Test_TensorFlow_layers, Convolution3D) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); -#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported +#endif runTensorFlowNet("conv3d"); } @@ -230,7 +229,7 @@ TEST_P(Test_TensorFlow_layers, padding) runTensorFlowNet("padding_valid"); runTensorFlowNet("spatial_padding"); runTensorFlowNet("mirror_pad"); -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019020000) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (target == DNN_TARGET_MYRIAD) { if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) @@ -343,6 +342,7 @@ TEST_P(Test_TensorFlow_layers, concat_axis_1) TEST_P(Test_TensorFlow_layers, concat_3d) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU) { if (target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16); @@ -352,6 +352,7 @@ TEST_P(Test_TensorFlow_layers, concat_3d) if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH || backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) && target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); +#endif runTensorFlowNet("concat_3d"); } @@ -429,13 +430,27 @@ TEST_P(Test_TensorFlow_layers, batch_norm3D) TEST_P(Test_TensorFlow_layers, slim_batch_norm) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +#endif // Output values range: [-40.0597, 207.827] - double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.041 : default_l1; - double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.33 : default_lInf; + double l1 = default_l1; + double lInf = default_lInf; + if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) + { + l1 = 0.041; + lInf = 0.33; + } +#if defined(INF_ENGINE_RELEASE) + else if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU) + { + lInf = 0.0002; + } +#endif + runTensorFlowNet("slim_batch_norm", false, l1, lInf); } @@ -562,7 +577,7 @@ TEST_P(Test_TensorFlow_layers, max_pool_grad) TEST_P(Test_TensorFlow_layers, ave_pool_same) { // Reference output values are in range [-0.519531, 0.112976] -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) { if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) @@ -576,29 +591,42 @@ TEST_P(Test_TensorFlow_layers, ave_pool_same) TEST_P(Test_TensorFlow_layers, MaxPooling3D) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + // accuracy + if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16) + applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, + CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION + ); + // IE exception: [ GENERAL_ERROR ] AssertionFailed: !expired() + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + } #endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported - if (target != DNN_TARGET_CPU) - throw SkipTestException("Only CPU is supported"); +#endif + if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU) + throw SkipTestException("Only CPU is supported"); // FIXIT use tags + runTensorFlowNet("max_pool3d"); } TEST_P(Test_TensorFlow_layers, AvePooling3D) { -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); -#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported - if (target != DNN_TARGET_CPU) - throw SkipTestException("Only CPU is supported"); +#endif + if (backend == DNN_BACKEND_OPENCV && target != DNN_TARGET_CPU) + throw SkipTestException("Only CPU is supported"); // FIXIT use tags + runTensorFlowNet("ave_pool3d"); } @@ -628,10 +656,12 @@ TEST_P(Test_TensorFlow_layers, matmul) TEST_P(Test_TensorFlow_layers, reshape) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +#endif runTensorFlowNet("shift_reshape_no_reorder"); runTensorFlowNet("reshape_no_reorder"); runTensorFlowNet("reshape_reduce"); @@ -1188,18 +1218,35 @@ TEST_P(Test_TensorFlow_layers, quantized) TEST_P(Test_TensorFlow_layers, lstm) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + // Exception: Ngraph operation Reshape with name Reshape has dynamic output shape on 0 port, but CPU plug-in supports only static shape + if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16) + applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, + CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION + ); + // Xlink + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + } +#endif if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); + runTensorFlowNet("lstm", true); runTensorFlowNet("lstm", true, 0.0, 0.0, true); } TEST_P(Test_TensorFlow_layers, split) { + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) @@ -1229,8 +1276,10 @@ TEST_P(Test_TensorFlow_layers, resize_nearest_neighbor_align_corners) TEST_P(Test_TensorFlow_layers, resize_nearest_neighbor_half_pixel) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +#endif runTensorFlowNet("resize_nearest_neighbor", false, 0.0, 0.0, false, "_half_pixel"); } @@ -1369,10 +1418,26 @@ TEST_P(Test_TensorFlow_layers, clip_by_value) TEST_P(Test_TensorFlow_layers, tf2_prelu) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + // IE exception: Input prelu:StatefulPartitionedCall/StatefulPartitionedCall/sequential/p_re_lu/add hasn't been found in primitiveIDs map + if (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16) + applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, + CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION + ); + // IE exception: Eltwise node with name `StatefulPartitionedCall/StatefulPartitionedCall/sequential/p_re_lu/add` has invalid input/output dims configuration + if (target == DNN_TARGET_CPU) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); + } +#endif + runTensorFlowNet("tf2_prelu"); } diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp index 96e8ac85a8..fdd5a4b923 100644 --- a/modules/dnn/test/test_torch_importer.cpp +++ b/modules/dnn/test/test_torch_importer.cpp @@ -211,23 +211,32 @@ TEST_P(Test_Torch_layers, net_lp_pooling_square) } TEST_P(Test_Torch_layers, net_lp_pooling_power) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) - applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif runTorchNet("net_lp_pooling_power", "", false, true); } TEST_P(Test_Torch_layers, net_conv_gemm_lrn) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); +#endif double l1 = 0.0, lInf = 0.0; if (target == DNN_TARGET_OPENCL_FP16) { l1 = 0.046; lInf = 0.023; } + else if (target == DNN_TARGET_MYRIAD) + { + l1 = 0.02; + lInf = 0.05; + } // The OpenCL kernels use the native_ math functions which have // implementation defined accuracy, so we use relaxed thresholds. See // https://github.com/opencv/opencv/issues/9821 for more details.