mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 19:20:28 +08:00
Merge pull request #21133 from alalek:dnn_test_ie_update_3.4
This commit is contained in:
commit
b55d8f46f4
@ -209,8 +209,16 @@ TEST_P(DNNTestNetwork, MobileNet_SSD_Caffe_Different_Width_Height)
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) &&
|
||||
target == DNN_TARGET_MYRIAD && getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X);
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_X, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE exception: Ngraph operation Transpose with name conv15_2_mbox_conf_perm has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
|
||||
Mat sample = imread(findDataFile("dnn/street.png"));
|
||||
Mat inp = blobFromImage(sample, 1.0f / 127.5, Size(300, 560), Scalar(127.5, 127.5, 127.5), false);
|
||||
float diffScores = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.029 : 0.0;
|
||||
@ -280,12 +288,23 @@ TEST_P(DNNTestNetwork, SSD_VGG16)
|
||||
CV_TEST_TAG_DEBUG_VERYLONG);
|
||||
if (backend == DNN_BACKEND_HALIDE && target == DNN_TARGET_CPU)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_HALIDE); // TODO HALIDE_CPU
|
||||
double scoreThreshold = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0325 : 0.0;
|
||||
const float lInf = (target == DNN_TARGET_MYRIAD) ? 0.032 : 0.0;
|
||||
|
||||
Mat sample = imread(findDataFile("dnn/street.png"));
|
||||
Mat inp = blobFromImage(sample, 1.0f, Size(300, 300), Scalar(), false);
|
||||
|
||||
float scoreDiff = 0.0, iouDiff = 0.0;
|
||||
if (target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
scoreDiff = 0.04;
|
||||
}
|
||||
else if (target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
scoreDiff = 0.0325;
|
||||
iouDiff = 0.032;
|
||||
}
|
||||
|
||||
processNet("dnn/VGG_ILSVRC2016_SSD_300x300_iter_440000.caffemodel",
|
||||
"dnn/ssd_vgg16.prototxt", inp, "detection_out", "", scoreThreshold, lInf);
|
||||
"dnn/ssd_vgg16.prototxt", inp, "detection_out", "", scoreDiff, iouDiff);
|
||||
expectNoFallbacksFromIE(net);
|
||||
}
|
||||
|
||||
|
@ -489,10 +489,12 @@ TEST_P(Test_Caffe_nets, Colorization)
|
||||
{
|
||||
l1 = 0.5; lInf = 11;
|
||||
}
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
l1 = 0.26; lInf = 6.5;
|
||||
l1 = 0.3; lInf = 10;
|
||||
}
|
||||
#endif
|
||||
|
||||
normAssert(out, ref, "", l1, lInf);
|
||||
expectNoFallbacksFromIE(net);
|
||||
@ -682,6 +684,13 @@ TEST_P(Test_Caffe_nets, FasterRCNN_zf)
|
||||
#endif
|
||||
CV_TEST_TAG_DEBUG_LONG
|
||||
);
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE exception: Ngraph operation Reshape with name rpn_cls_score_reshape has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
|
||||
@ -701,6 +710,11 @@ TEST_P(Test_Caffe_nets, RFCN)
|
||||
CV_TEST_TAG_LONG,
|
||||
CV_TEST_TAG_DEBUG_VERYLONG
|
||||
);
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// Exception: Function contains several inputs and outputs with one friendly name! (HETERO bug?)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16);
|
||||
|
@ -155,16 +155,19 @@ public:
|
||||
|
||||
static void checkBackend(int backend, int target, Mat* inp = 0, Mat* ref = 0)
|
||||
{
|
||||
CV_UNUSED(backend); CV_UNUSED(target); CV_UNUSED(inp); CV_UNUSED(ref);
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021000000)
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
&& target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
if (inp && ref && inp->dims == 4 && ref->dims == 4 &&
|
||||
inp->size[0] != 1 && inp->size[0] != ref->size[0])
|
||||
{
|
||||
std::cout << "Inconsistent batch size of input and output blobs for Myriad plugin" << std::endl;
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
|
||||
throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin");
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void expectNoFallbacks(Net& net, bool raiseError = true)
|
||||
|
@ -245,13 +245,13 @@ public:
|
||||
nms_boxes.push_back(box);
|
||||
nms_confidences.push_back(conf);
|
||||
nms_classIds.push_back(class_id);
|
||||
#if 0 // use to update test reference data
|
||||
std::cout << b << ", " << class_id << ", " << conf << "f, "
|
||||
<< box.x << "f, " << box.y << "f, "
|
||||
<< box.x + box.width << "f, " << box.y + box.height << "f,"
|
||||
<< std::endl;
|
||||
#endif
|
||||
|
||||
if (cvtest::debugLevel > 0)
|
||||
{
|
||||
std::cout << b << ", " << class_id << ", " << conf << "f, "
|
||||
<< box.x << "f, " << box.y << "f, "
|
||||
<< box.x + box.width << "f, " << box.y + box.height << "f,"
|
||||
<< std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
if (cvIsNaN(iouDiff))
|
||||
@ -347,10 +347,22 @@ TEST_P(Test_Darknet_nets, YoloVoc)
|
||||
1, 6, 0.667770f, 0.446555f, 0.453578f, 0.499986f, 0.519167f, // a car
|
||||
1, 6, 0.844947f, 0.637058f, 0.460398f, 0.828508f, 0.66427f); // a car
|
||||
|
||||
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 1e-2 : 8e-5;
|
||||
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.018 : 3e-4;
|
||||
double nmsThreshold = (target == DNN_TARGET_MYRIAD) ? 0.397 : 0.4;
|
||||
|
||||
double scoreDiff = 8e-5, iouDiff = 3e-4;
|
||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
scoreDiff = 1e-2;
|
||||
iouDiff = 0.018;
|
||||
}
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
iouDiff = std::numeric_limits<double>::quiet_NaN();
|
||||
}
|
||||
#endif
|
||||
|
||||
std::string config_file = "yolo-voc.cfg";
|
||||
std::string weights_file = "yolo-voc.weights";
|
||||
|
||||
@ -363,6 +375,12 @@ TEST_P(Test_Darknet_nets, YoloVoc)
|
||||
SCOPED_TRACE("batch size 2");
|
||||
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff, 0.24, nmsThreshold);
|
||||
}
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST_P(Test_Darknet_nets, TinyYoloVoc)
|
||||
@ -584,6 +602,14 @@ TEST_P(Test_Darknet_nets, YOLOv4)
|
||||
std::string config_file = "yolov4.cfg";
|
||||
std::string weights_file = "yolov4.weights";
|
||||
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy (batch 1)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
iouDiff = std::numeric_limits<double>::quiet_NaN();
|
||||
}
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ||
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) && target == DNN_TARGET_MYRIAD &&
|
||||
@ -602,6 +628,13 @@ TEST_P(Test_Darknet_nets, YOLOv4)
|
||||
{
|
||||
SCOPED_TRACE("batch size 2");
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy (batch 1)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
iouDiff = 0.45f;
|
||||
}
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
{
|
||||
@ -617,6 +650,12 @@ TEST_P(Test_Darknet_nets, YOLOv4)
|
||||
|
||||
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff);
|
||||
}
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
}
|
||||
|
||||
TEST_P(Test_Darknet_nets, YOLOv4_tiny)
|
||||
@ -685,6 +724,13 @@ TEST_P(Test_Darknet_nets, YOLOv4x_mish)
|
||||
{
|
||||
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB));
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE exception: Ngraph operation Transpose with name permute_168 has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000) // nGraph compilation failure
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
|
@ -39,12 +39,13 @@ static void test(Mat& input, Net& net, Backend backendId, Target targetId, bool
|
||||
l1 = default_l1;
|
||||
if (lInf == 0.0)
|
||||
lInf = default_lInf;
|
||||
#if 0
|
||||
std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl;
|
||||
std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
|
||||
std::cout << outputHalide.reshape(1, outputDefault.total()).t() << std::endl;
|
||||
#endif
|
||||
normAssert(outputDefault, outputHalide, "", l1, lInf);
|
||||
if (cvtest::debugLevel > 0 || testing::Test::HasFailure())
|
||||
{
|
||||
std::cout << "l1=" << l1 << " lInf=" << lInf << std::endl;
|
||||
std::cout << outputDefault.reshape(1, outputDefault.total()).t() << std::endl;
|
||||
std::cout << outputHalide.reshape(1, outputDefault.total()).t() << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
static void test(LayerParams& params, Mat& input, Backend backendId, Target targetId, bool skipCheck = false, double l1 = 0.0, double lInf = 0.0)
|
||||
@ -795,6 +796,16 @@ TEST_P(Eltwise, Accuracy)
|
||||
Backend backendId = get<0>(get<4>(GetParam()));
|
||||
Target targetId = get<1>(get<4>(GetParam()));
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL &&
|
||||
inSize == Vec3i(1, 4, 5) && op == "sum" && numConv == 1 && !weighted)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && targetId == DNN_TARGET_OPENCL &&
|
||||
inSize == Vec3i(2, 8, 6) && op == "sum" && numConv == 1 && !weighted)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2018050000)
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && targetId == DNN_TARGET_MYRIAD &&
|
||||
inSize == Vec3i(1, 4, 5))
|
||||
|
@ -373,7 +373,7 @@ TEST_P(Test_Caffe_layers, layer_prelu_fc)
|
||||
// Reference output values are in range [-0.0001, 10.3906]
|
||||
double l1 = (target == DNN_TARGET_MYRIAD) ? 0.005 : 0.0;
|
||||
double lInf = (target == DNN_TARGET_MYRIAD) ? 0.021 : 0.0;
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
|
||||
{
|
||||
l1 = 0.006f; lInf = 0.05f;
|
||||
@ -1416,6 +1416,14 @@ TEST_P(Test_DLDT_two_inputs, as_backend)
|
||||
double l1 = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.06 : 1e-6;
|
||||
double lInf = (targetId == DNN_TARGET_OPENCL_FP16 || targetId == DNN_TARGET_MYRIAD) ? 0.3 : 1e-5;
|
||||
normAssert(out, ref, "", l1, lInf);
|
||||
if (cvtest::debugLevel > 0 || HasFailure())
|
||||
{
|
||||
std::cout << "input1 scale=" << kScale << " input2 scale=" << kScaleInv << std::endl;
|
||||
std::cout << "input1: " << firstInp.size << " " << firstInp.reshape(1, 1) << std::endl;
|
||||
std::cout << "input2: " << secondInp.size << " " << secondInp.reshape(1, 1) << std::endl;
|
||||
std::cout << "ref: " << ref.reshape(1, 1) << std::endl;
|
||||
std::cout << "out: " << out.reshape(1, 1) << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine(
|
||||
|
@ -686,6 +686,14 @@ TEST_P(Test_ONNX_layers, Split_EltwiseMax)
|
||||
|
||||
TEST_P(Test_ONNX_layers, LSTM_Activations)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE Exception: Ngraph operation Reshape with name Block1237_Output_0_before_reshape has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
|
||||
testONNXModels("lstm_cntk_tanh", pb, 0, 0, false, false);
|
||||
}
|
||||
|
||||
@ -810,6 +818,13 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias)
|
||||
|
||||
TEST_P(Test_ONNX_layers, GatherMultiOutput)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE Exception: Ngraph operation Reshape with name 6 has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
|
||||
@ -817,7 +832,7 @@ TEST_P(Test_ONNX_layers, GatherMultiOutput)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
|
||||
#endif
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LE(2021030000)
|
||||
if (target == DNN_TARGET_MYRIAD)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE);
|
||||
#endif
|
||||
@ -827,14 +842,25 @@ TEST_P(Test_ONNX_layers, GatherMultiOutput)
|
||||
|
||||
TEST_P(Test_ONNX_layers, DynamicAxes)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// accuracy
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
{
|
||||
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
|
||||
}
|
||||
#if INF_ENGINE_VER_MAJOR_LT(2021000000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
{
|
||||
if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
testONNXModels("squeeze_and_conv_dynamic_axes");
|
||||
testONNXModels("unsqueeze_and_conv_dynamic_axes");
|
||||
testONNXModels("gather_dynamic_axes");
|
||||
@ -914,6 +940,13 @@ TEST_P(Test_ONNX_layers, PoolConv1d)
|
||||
|
||||
TEST_P(Test_ONNX_layers, ConvResizePool1d)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE Exception: Ngraph operation Reshape with name 15 has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
{
|
||||
@ -1116,8 +1149,12 @@ TEST_P(Test_ONNX_nets, TinyYolov2)
|
||||
#endif
|
||||
|
||||
// output range: [-11; 8]
|
||||
double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.017 : default_l1;
|
||||
double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.14 : default_lInf;
|
||||
double l1 = default_l1, lInf = default_lInf;
|
||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
l1 = 0.02;
|
||||
lInf = 0.2;
|
||||
}
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
@ -1202,10 +1239,10 @@ TEST_P(Test_ONNX_nets, Emotion_ferplus)
|
||||
l1 = 2.4e-4;
|
||||
lInf = 6e-4;
|
||||
}
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000)
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
l1 = 0.012f; lInf = 0.035f;
|
||||
l1 = 0.013f; lInf = 0.035f;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -83,6 +83,10 @@ public:
|
||||
void runTensorFlowNet(const std::string& prefix, bool hasText = false,
|
||||
double l1 = 0.0, double lInf = 0.0, bool memoryLoad = false, const std::string& groupPrefix = "")
|
||||
{
|
||||
if (cvtest::debugLevel > 0)
|
||||
{
|
||||
std::cout << prefix << groupPrefix << std::endl;
|
||||
}
|
||||
std::string netPath = path(prefix + groupPrefix + "_net.pb");
|
||||
std::string netConfig = (hasText ? path(prefix + groupPrefix + "_net.pbtxt") : "");
|
||||
std::string inpPath = path(prefix + "_in.npy");
|
||||
@ -118,6 +122,16 @@ public:
|
||||
net.setInput(input);
|
||||
cv::Mat output = net.forward();
|
||||
normAssert(ref, output, "", l1 ? l1 : default_l1, lInf ? lInf : default_lInf);
|
||||
|
||||
if (cvtest::debugLevel > 0 || HasFailure())
|
||||
{
|
||||
std::cout << "input: " << input.size << std::endl;
|
||||
std::cout << input.reshape(1, 1) << std::endl;
|
||||
std::cout << "ref " << ref.size << std::endl;
|
||||
std::cout << ref.reshape(1, 1) << std::endl;
|
||||
std::cout << "output: " << output.size << std::endl;
|
||||
std::cout << output.reshape(1, 1) << std::endl;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@ -132,7 +146,7 @@ TEST_P(Test_TensorFlow_layers, reduce_max)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER);
|
||||
runTensorFlowNet("max_pool_by_axis");
|
||||
runTensorFlowNet("max_pool_by_axis", false, 0.0f, 0.0f);
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, reduce_sum)
|
||||
@ -144,7 +158,11 @@ TEST_P(Test_TensorFlow_layers, reduce_sum)
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, reduce_max_channel)
|
||||
{
|
||||
runTensorFlowNet("reduce_max_channel");
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) // incorrect result
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("reduce_max_channel", false, 0.0f, 0.0f);
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, reduce_sum_channel)
|
||||
@ -154,6 +172,10 @@ TEST_P(Test_TensorFlow_layers, reduce_sum_channel)
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, reduce_max_channel_keep_dims)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) // incorrect result
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("reduce_max_channel", false, 0.0, 0.0, false, "_keep_dims");
|
||||
}
|
||||
|
||||
@ -220,13 +242,49 @@ TEST_P(Test_TensorFlow_layers, padding)
|
||||
runTensorFlowNet("keras_pad_concat");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric)
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_1)
|
||||
{
|
||||
runTensorFlowNet("conv2d_asymmetric_pads_nchw");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_2)
|
||||
{
|
||||
runTensorFlowNet("conv2d_asymmetric_pads_nhwc");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_3)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU) // Exception: Unsupported pad value
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) // Exception: Unsupported pad value
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("max_pool2d_asymmetric_pads_nchw");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_4)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_CPU) // Exception: Unsupported pad value
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_CPU, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020020000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) // Exception: Unsupported pad value
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("max_pool2d_asymmetric_pads_nhwc");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_5)
|
||||
{
|
||||
runTensorFlowNet("conv2d_backprop_input_asymmetric_pads_nchw");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, padding_asymmetric_6)
|
||||
{
|
||||
runTensorFlowNet("conv2d_backprop_input_asymmetric_pads_nhwc");
|
||||
}
|
||||
|
||||
@ -267,6 +325,13 @@ TEST_P(Test_TensorFlow_layers, pad_and_concat)
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, concat_axis_1)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE Exception: Ngraph operation Transpose with name Flatten_1/flatten/Reshape/nhwc has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021030000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // exception
|
||||
@ -413,19 +478,77 @@ TEST_P(Test_TensorFlow_layers, pooling_reduce_sum)
|
||||
runTensorFlowNet("reduce_sum"); // a SUM pooling over all spatial dimensions.
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum2)
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_0_false)
|
||||
{
|
||||
int axises[] = {0, 1, 2, 3};
|
||||
for (int keepdims = 0; keepdims <= 1; ++keepdims)
|
||||
{
|
||||
for (int i = 0; i < sizeof(axises)/sizeof(axises[0]); ++i)
|
||||
{
|
||||
runTensorFlowNet(cv::format("reduce_sum_%d_%s", axises[i], (keepdims ? "True" : "False")));
|
||||
}
|
||||
runTensorFlowNet(cv::format("reduce_sum_1_2_%s", keepdims ? "True" : "False"));
|
||||
}
|
||||
runTensorFlowNet("reduce_sum_0_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_1_false)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_1_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_2_false)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_2_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_3_false)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_3_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_1_2_false)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
default_l1 = 0.01f;
|
||||
}
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
default_l1 = 0.01f;
|
||||
}
|
||||
#endif
|
||||
runTensorFlowNet("reduce_sum_1_2_False");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_0_true)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_0_True");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_1_true)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_1_True");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_2_true)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_2_True");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_3_true)
|
||||
{
|
||||
runTensorFlowNet("reduce_sum_3_True");
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, pooling_reduce_sum_1_2_true)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
default_l1 = 0.01f;
|
||||
}
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
{
|
||||
default_l1 = 0.01f;
|
||||
}
|
||||
#endif
|
||||
runTensorFlowNet("reduce_sum_1_2_True");
|
||||
}
|
||||
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, max_pool_grad)
|
||||
{
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
|
||||
@ -642,8 +765,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
|
||||
net.setInput(inp);
|
||||
Mat out = net.forward();
|
||||
|
||||
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0043 : default_l1;
|
||||
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.037 : default_lInf;
|
||||
double scoreDiff = default_l1, iouDiff = default_lInf;
|
||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
scoreDiff = 0.01;
|
||||
iouDiff = 0.1;
|
||||
}
|
||||
|
||||
normAssertDetections(ref, out, "", 0.2, scoreDiff, iouDiff);
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE >= 2019010000
|
||||
expectNoFallbacksFromIE(net);
|
||||
@ -720,16 +848,13 @@ TEST_P(Test_TensorFlow_nets, MobileNet_v1_SSD)
|
||||
expectNoFallbacksFromIE(net);
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_nets, Faster_RCNN)
|
||||
TEST_P(Test_TensorFlow_nets, Faster_RCNN_inception_v2_coco_2018_01_28)
|
||||
{
|
||||
// FIXIT split test
|
||||
applyTestTag(
|
||||
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
|
||||
CV_TEST_TAG_LONG,
|
||||
CV_TEST_TAG_DEBUG_VERYLONG
|
||||
);
|
||||
static std::string names[] = {"faster_rcnn_inception_v2_coco_2018_01_28",
|
||||
"faster_rcnn_resnet50_coco_2018_01_28"};
|
||||
|
||||
#ifdef INF_ENGINE_RELEASE
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
|
||||
@ -740,21 +865,28 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
|
||||
// segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
|
||||
// Assertion `prior_height > 0' failed.
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
|
||||
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
|
||||
#endif
|
||||
|
||||
checkBackend();
|
||||
|
||||
double scoresDiff = backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ? 2.9e-5 : 1e-5;
|
||||
for (int i = 0; i < 2; ++i)
|
||||
double scoresDiff = 1e-5;
|
||||
double iouDiff = 1e-4;
|
||||
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
{
|
||||
std::string proto = findDataFile("dnn/" + names[i] + ".pbtxt");
|
||||
std::string model = findDataFile("dnn/" + names[i] + ".pb", false);
|
||||
scoresDiff = 0.02;
|
||||
iouDiff = 0.1;
|
||||
}
|
||||
|
||||
std::string name = "faster_rcnn_inception_v2_coco_2018_01_28";
|
||||
{
|
||||
std::string proto = findDataFile("dnn/" + name + ".pbtxt");
|
||||
std::string model = findDataFile("dnn/" + name + ".pb", false);
|
||||
|
||||
Net net = readNetFromTensorflow(model, proto);
|
||||
net.setPreferableBackend(backend);
|
||||
@ -765,8 +897,74 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
|
||||
net.setInput(blob);
|
||||
Mat out = net.forward();
|
||||
|
||||
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + names[i] + ".detection_out.npy"));
|
||||
normAssertDetections(ref, out, names[i].c_str(), 0.3, scoresDiff);
|
||||
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + name + ".detection_out.npy"));
|
||||
|
||||
// accuracy (both OpenCV & IE)
|
||||
if (target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
|
||||
|
||||
normAssertDetections(ref, out, name.c_str(), 0.3, scoresDiff, iouDiff);
|
||||
}
|
||||
}
|
||||
|
||||
TEST_P(Test_TensorFlow_nets, Faster_RCNN_resnet50_coco_2018_01_28)
|
||||
{
|
||||
applyTestTag(
|
||||
(target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB),
|
||||
CV_TEST_TAG_LONG,
|
||||
CV_TEST_TAG_DEBUG_VERYLONG
|
||||
);
|
||||
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
// IE exception: Ngraph operation Transpose with name FirstStageBoxPredictor/ClassPredictor/reshape_1/nhwc has dynamic output shape on 0 port, but CPU plug-in supports only static shape
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
|
||||
applyTestTag(target == DNN_TARGET_OPENCL ? CV_TEST_TAG_DNN_SKIP_IE_OPENCL : CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16,
|
||||
CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION
|
||||
);
|
||||
#endif
|
||||
|
||||
#ifdef INF_ENGINE_RELEASE
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 &&
|
||||
(INF_ENGINE_VER_MAJOR_LT(2019020000) || target != DNN_TARGET_CPU))
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
|
||||
if (INF_ENGINE_VER_MAJOR_GT(2019030000) &&
|
||||
backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
#endif
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2021040000)
|
||||
// segfault: inference-engine/thirdparty/clDNN/src/gpu/detection_output_cpu.cpp:111:
|
||||
// Assertion `prior_height > 0' failed.
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH);
|
||||
#endif
|
||||
|
||||
checkBackend();
|
||||
|
||||
double scoresDiff = backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 ? 2.9e-5 : 1e-5;
|
||||
double iouDiff = 1e-4;
|
||||
|
||||
std::string name = "faster_rcnn_resnet50_coco_2018_01_28";
|
||||
{
|
||||
std::string proto = findDataFile("dnn/" + name + ".pbtxt");
|
||||
std::string model = findDataFile("dnn/" + name + ".pb", false);
|
||||
|
||||
Net net = readNetFromTensorflow(model, proto);
|
||||
net.setPreferableBackend(backend);
|
||||
net.setPreferableTarget(target);
|
||||
Mat img = imread(findDataFile("dnn/dog416.png"));
|
||||
Mat blob = blobFromImage(img, 1.0f, Size(800, 600), Scalar(), true, false);
|
||||
|
||||
net.setInput(blob);
|
||||
Mat out = net.forward();
|
||||
|
||||
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/" + name + ".detection_out.npy"));
|
||||
|
||||
// accuracy
|
||||
if (target == DNN_TARGET_OPENCL_FP16)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
|
||||
|
||||
normAssertDetections(ref, out, name.c_str(), 0.3, scoresDiff, iouDiff);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1152,6 +1350,10 @@ TEST_P(Test_TensorFlow_layers, resize_bilinear_down)
|
||||
|
||||
TEST_P(Test_TensorFlow_layers, resize_concat_optimization)
|
||||
{
|
||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021040000)
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) // Exception: Function contains several inputs and outputs with one friendly name! (HETERO bug?)
|
||||
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
|
||||
#endif
|
||||
runTensorFlowNet("resize_concat_optimization");
|
||||
}
|
||||
|
||||
@ -1271,7 +1473,7 @@ TEST_P(Test_TensorFlow_nets, Mask_RCNN)
|
||||
Mat outDetections = outs[0];
|
||||
Mat outMasks = outs[1];
|
||||
|
||||
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.019 : 2e-5;
|
||||
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.2 : 2e-5;
|
||||
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.018 : default_lInf;
|
||||
normAssertDetections(refDetections, outDetections, "", /*threshold for zero confidence*/1e-5, scoreDiff, iouDiff);
|
||||
|
||||
@ -1305,7 +1507,7 @@ TEST_P(Test_TensorFlow_nets, Mask_RCNN)
|
||||
|
||||
double inter = cv::countNonZero(masks & refMasks);
|
||||
double area = cv::countNonZero(masks | refMasks);
|
||||
EXPECT_GE(inter / area, 0.99);
|
||||
EXPECT_GE(inter / area, (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.98 : 0.99);
|
||||
|
||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
expectNoFallbacks(net);
|
||||
|
@ -164,8 +164,12 @@ TEST_P(Test_Torch_layers, run_concat)
|
||||
|
||||
TEST_P(Test_Torch_layers, run_depth_concat)
|
||||
{
|
||||
runTorchNet("net_depth_concat", "", false, true, true, 0.0,
|
||||
target == DNN_TARGET_OPENCL_FP16 ? 0.032 : 0.0);
|
||||
double lInf = 0.0;
|
||||
if (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)
|
||||
{
|
||||
lInf = 0.032;
|
||||
}
|
||||
runTorchNet("net_depth_concat", "", false, true, true, 0.0, lInf);
|
||||
}
|
||||
|
||||
TEST_P(Test_Torch_layers, run_deconv)
|
||||
|
Loading…
Reference in New Issue
Block a user