mirror of
https://github.com/opencv/opencv.git
synced 2025-06-28 23:50:58 +08:00
Enable some tests for Inference Engine 2019R1
This commit is contained in:
parent
dad2247b56
commit
a2bbfa1db5
@ -1160,12 +1160,6 @@ struct Net::Impl
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
currLayer->unsetAttached();
|
currLayer->unsetAttached();
|
||||||
|
|
||||||
Ptr<PoolingLayer> poolingLayer = currLayer.dynamicCast<PoolingLayer>();
|
|
||||||
if( !poolingLayer.empty() )
|
|
||||||
{
|
|
||||||
poolingLayer->computeMaxIdx = true;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
layersTimings.clear();
|
layersTimings.clear();
|
||||||
@ -2082,30 +2076,11 @@ struct Net::Impl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// the optimization #2. if there is no layer that takes max pooling layer's computed
|
|
||||||
// max indices (and only some semantical segmentation networks might need this;
|
|
||||||
// many others only take the maximum values), then we switch the max pooling
|
|
||||||
// layer to the faster operating mode.
|
|
||||||
Ptr<PoolingLayer> poolingLayer = ld.layerInstance.dynamicCast<PoolingLayer>();
|
|
||||||
if( !poolingLayer.empty() && !ld.consumers.empty() )
|
|
||||||
{
|
|
||||||
size_t i = 0, nconsumers = ld.consumers.size();
|
|
||||||
for( ; i < nconsumers; i++ )
|
|
||||||
if( ld.consumers[i].oid > 0 )
|
|
||||||
break;
|
|
||||||
// if there is no layer that takes the second output pin of the pooling layer
|
|
||||||
// on input then we don't need to compute the indices
|
|
||||||
if( i >= nconsumers )
|
|
||||||
{
|
|
||||||
poolingLayer->computeMaxIdx = false;
|
|
||||||
printf_(("\tsimplified pooling layer %s\n", poolingLayer->name.c_str()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (preferableBackend != DNN_BACKEND_OPENCV)
|
if (preferableBackend != DNN_BACKEND_OPENCV)
|
||||||
continue; // Go to the next layer.
|
continue; // Go to the next layer.
|
||||||
|
|
||||||
// the optimization #3. if there is concat layer that concatenates channels
|
// the optimization #2. if there is concat layer that concatenates channels
|
||||||
// from the inputs together (i.e. axis == 1) then we make the inputs of
|
// from the inputs together (i.e. axis == 1) then we make the inputs of
|
||||||
// the concat layer to write to the concatenation output buffer
|
// the concat layer to write to the concatenation output buffer
|
||||||
// (and so we eliminate the concatenation layer, because the channels
|
// (and so we eliminate the concatenation layer, because the channels
|
||||||
|
@ -256,8 +256,11 @@ struct ReLUFunctor
|
|||||||
|
|
||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
|
#ifdef HAVE_INF_ENGINE
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE;
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
|
||||||
|
return slope >= 0 || !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
|
||||||
|
#endif
|
||||||
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
|
||||||
}
|
}
|
||||||
|
|
||||||
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
||||||
@ -741,8 +744,11 @@ struct AbsValFunctor
|
|||||||
|
|
||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE ||
|
#ifdef HAVE_INF_ENGINE
|
||||||
backendId == DNN_BACKEND_INFERENCE_ENGINE;
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
|
||||||
|
return !INF_ENGINE_VER_MAJOR_EQ(INF_ENGINE_RELEASE_2019R1);
|
||||||
|
#endif
|
||||||
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE;
|
||||||
}
|
}
|
||||||
|
|
||||||
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
||||||
|
@ -159,8 +159,8 @@ public:
|
|||||||
InferenceEngine::Builder::Layer ieLayer(name);
|
InferenceEngine::Builder::Layer ieLayer(name);
|
||||||
ieLayer.setName(name);
|
ieLayer.setName(name);
|
||||||
ieLayer.setType("Flatten");
|
ieLayer.setType("Flatten");
|
||||||
ieLayer.getParameters()["axis"] = _startAxis;
|
ieLayer.getParameters()["axis"] = (size_t)_startAxis;
|
||||||
ieLayer.getParameters()["end_axis"] = _endAxis;
|
ieLayer.getParameters()["end_axis"] = _endAxis; // Do not cast to size_t because it might be negative.
|
||||||
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
|
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
|
||||||
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
||||||
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
||||||
|
@ -12,6 +12,7 @@ Implementation of padding layer, which adds paddings to input blob.
|
|||||||
#include "../precomp.hpp"
|
#include "../precomp.hpp"
|
||||||
#include "layers_common.hpp"
|
#include "layers_common.hpp"
|
||||||
#include "../op_halide.hpp"
|
#include "../op_halide.hpp"
|
||||||
|
#include "../op_inf_engine.hpp"
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
namespace cv
|
namespace cv
|
||||||
@ -68,28 +69,36 @@ public:
|
|||||||
|
|
||||||
// Compute dstRanges.
|
// Compute dstRanges.
|
||||||
const MatSize& inpShape = inputs[0].size;
|
const MatSize& inpShape = inputs[0].size;
|
||||||
dstRanges.resize(paddings.size());
|
|
||||||
|
|
||||||
int offset = 0;
|
|
||||||
if (inputDims != -1 && inputs[0].dims != inputDims)
|
if (inputDims != -1 && inputs[0].dims != inputDims)
|
||||||
{
|
{
|
||||||
dstRanges.insert(dstRanges.begin(), Range::all());
|
paddings.insert(paddings.begin(), std::make_pair(0, 0));
|
||||||
offset = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
dstRanges.resize(paddings.size());
|
||||||
for (int i = 0; i < paddings.size(); ++i)
|
for (int i = 0; i < paddings.size(); ++i)
|
||||||
{
|
{
|
||||||
dstRanges[offset + i].start = paddings[i].first;
|
dstRanges[i].start = paddings[i].first;
|
||||||
dstRanges[offset + i].end = paddings[i].first + inpShape[offset + i];
|
dstRanges[i].end = paddings[i].first + inpShape[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the rest of dimensions.
|
// Add the rest of dimensions.
|
||||||
for (int i = dstRanges.size(); i < inputs[0].dims; ++i)
|
for (int i = dstRanges.size(); i < inputs[0].dims; ++i)
|
||||||
|
{
|
||||||
dstRanges.push_back(Range::all());
|
dstRanges.push_back(Range::all());
|
||||||
|
paddings.push_back(std::make_pair(0, 0));
|
||||||
|
}
|
||||||
|
inputDims = -1; // Next time paddings are filled for all the dimensions.
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
{
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE)
|
||||||
|
return INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2019R1) &&
|
||||||
|
(preferableTarget != DNN_TARGET_MYRIAD ||
|
||||||
|
(dstRanges.size() == 4 && paddings[0].first == 0 && paddings[0].second == 0));
|
||||||
|
#endif
|
||||||
return backendId == DNN_BACKEND_OPENCV ||
|
return backendId == DNN_BACKEND_OPENCV ||
|
||||||
(backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4);
|
(backendId == DNN_BACKEND_HALIDE && haveHalide() && dstRanges.size() == 4);
|
||||||
}
|
}
|
||||||
@ -109,7 +118,7 @@ public:
|
|||||||
{
|
{
|
||||||
std::vector<float> paddingValue_fp32(1, paddingValue);
|
std::vector<float> paddingValue_fp32(1, paddingValue);
|
||||||
std::vector<int16_t> paddingValue_fp16(1);
|
std::vector<int16_t> paddingValue_fp16(1);
|
||||||
convertFp16(paddingValue_fp32, paddingValue_fp16);
|
cv::convertFp16(paddingValue_fp32, paddingValue_fp16);
|
||||||
outputs[0].setTo(paddingValue_fp16[0]);
|
outputs[0].setTo(paddingValue_fp16[0]);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -173,6 +182,32 @@ public:
|
|||||||
return Ptr<BackendNode>();
|
return Ptr<BackendNode>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
|
||||||
|
{
|
||||||
|
#ifdef HAVE_INF_ENGINE
|
||||||
|
InferenceEngine::Builder::Layer ieLayer(name);
|
||||||
|
ieLayer.setName(name);
|
||||||
|
ieLayer.setType("Pad");
|
||||||
|
|
||||||
|
std::vector<int> begins(paddings.size(), 0), ends(paddings.size(), 0);
|
||||||
|
for (int i = 0; i < paddings.size(); ++i)
|
||||||
|
{
|
||||||
|
begins[i] = paddings[i].first;
|
||||||
|
ends[i] = paddings[i].second;
|
||||||
|
}
|
||||||
|
ieLayer.getParameters()["pads_begin"] = begins;
|
||||||
|
ieLayer.getParameters()["pads_end"] = ends;
|
||||||
|
ieLayer.getParameters()["pad_mode"] = paddingType;
|
||||||
|
if (paddingType == "constant")
|
||||||
|
ieLayer.getParameters()["pad_value"] = paddingValue;
|
||||||
|
|
||||||
|
ieLayer.setInputPorts(std::vector<InferenceEngine::Port>(1));
|
||||||
|
ieLayer.setOutputPorts(std::vector<InferenceEngine::Port>(1));
|
||||||
|
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
|
||||||
|
#endif
|
||||||
|
return Ptr<BackendNode>();
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::vector<std::pair<int, int> > paddings; // Pairs pad before, pad after.
|
std::vector<std::pair<int, int> > paddings; // Pairs pad before, pad after.
|
||||||
std::vector<Range> dstRanges;
|
std::vector<Range> dstRanges;
|
||||||
|
@ -140,7 +140,7 @@ public:
|
|||||||
#ifdef HAVE_OPENCL
|
#ifdef HAVE_OPENCL
|
||||||
poolOp.release();
|
poolOp.release();
|
||||||
#endif
|
#endif
|
||||||
computeMaxIdx = type == MAX;
|
computeMaxIdx = type == MAX && outputs.size() == 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
||||||
|
@ -289,12 +289,7 @@ TEST_P(DNNTestNetwork, OpenFace)
|
|||||||
#if INF_ENGINE_VER_MAJOR_EQ(2018050000)
|
#if INF_ENGINE_VER_MAJOR_EQ(2018050000)
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
|
||||||
throw SkipTestException("Test is disabled for Myriad targets");
|
throw SkipTestException("Test is disabled for Myriad targets");
|
||||||
#elif INF_ENGINE_VER_MAJOR_GE(2019010000)
|
#elif INF_ENGINE_VER_MAJOR_EQ(2018030000)
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
|
|
||||||
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
|
|
||||||
)
|
|
||||||
throw SkipTestException("Test is disabled for MyriadX target");
|
|
||||||
#else
|
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16)
|
||||||
throw SkipTestException("Test has been fixed in OpenVINO 2018R4");
|
throw SkipTestException("Test has been fixed in OpenVINO 2018R4");
|
||||||
#endif
|
#endif
|
||||||
|
@ -561,12 +561,6 @@ TEST_P(ReLU, Accuracy)
|
|||||||
float negativeSlope = get<0>(GetParam());
|
float negativeSlope = get<0>(GetParam());
|
||||||
Backend backendId = get<0>(get<1>(GetParam()));
|
Backend backendId = get<0>(get<1>(GetParam()));
|
||||||
Target targetId = get<1>(get<1>(GetParam()));
|
Target targetId = get<1>(get<1>(GetParam()));
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE
|
|
||||||
&& negativeSlope < 0
|
|
||||||
)
|
|
||||||
throw SkipTestException("Test is disabled");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
LayerParams lp;
|
LayerParams lp;
|
||||||
lp.set("negative_slope", negativeSlope);
|
lp.set("negative_slope", negativeSlope);
|
||||||
@ -589,13 +583,6 @@ TEST_P(NoParamActivation, Accuracy)
|
|||||||
LayerParams lp;
|
LayerParams lp;
|
||||||
lp.type = get<0>(GetParam());
|
lp.type = get<0>(GetParam());
|
||||||
lp.name = "testLayer";
|
lp.name = "testLayer";
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
|
|
||||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE
|
|
||||||
&& lp.type == "AbsVal"
|
|
||||||
)
|
|
||||||
throw SkipTestException("Test is disabled");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
testInPlaceActivation(lp, backendId, targetId);
|
testInPlaceActivation(lp, backendId, targetId);
|
||||||
}
|
}
|
||||||
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, NoParamActivation, Combine(
|
INSTANTIATE_TEST_CASE_P(Layer_Test_Halide, NoParamActivation, Combine(
|
||||||
|
@ -379,7 +379,7 @@ TEST_P(Test_ONNX_nets, LResNet100E_IR)
|
|||||||
lInf = 0.035;
|
lInf = 0.035;
|
||||||
}
|
}
|
||||||
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) {
|
else if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_CPU) {
|
||||||
l1 = 4.5e-5;
|
l1 = 4.6e-5;
|
||||||
lInf = 1.9e-4;
|
lInf = 1.9e-4;
|
||||||
}
|
}
|
||||||
testONNXModels("LResNet100E_IR", pb, l1, lInf);
|
testONNXModels("LResNet100E_IR", pb, l1, lInf);
|
||||||
|
@ -140,10 +140,6 @@ TEST_P(Test_TensorFlow_layers, padding)
|
|||||||
|
|
||||||
TEST_P(Test_TensorFlow_layers, padding_same)
|
TEST_P(Test_TensorFlow_layers, padding_same)
|
||||||
{
|
{
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
|
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
|
|
||||||
throw SkipTestException("Test is disabled for DLIE");
|
|
||||||
#endif
|
|
||||||
#if defined(INF_ENGINE_RELEASE)
|
#if defined(INF_ENGINE_RELEASE)
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
|
||||||
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
|
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
|
||||||
@ -251,10 +247,6 @@ TEST_P(Test_TensorFlow_layers, reshape)
|
|||||||
|
|
||||||
TEST_P(Test_TensorFlow_layers, flatten)
|
TEST_P(Test_TensorFlow_layers, flatten)
|
||||||
{
|
{
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
|
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
|
|
||||||
throw SkipTestException("Test is disabled for DLIE");
|
|
||||||
#endif
|
|
||||||
#if defined(INF_ENGINE_RELEASE)
|
#if defined(INF_ENGINE_RELEASE)
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
|
||||||
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
|
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2
|
||||||
@ -267,11 +259,6 @@ TEST_P(Test_TensorFlow_layers, flatten)
|
|||||||
|
|
||||||
TEST_P(Test_TensorFlow_layers, unfused_flatten)
|
TEST_P(Test_TensorFlow_layers, unfused_flatten)
|
||||||
{
|
{
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
|
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
|
|
||||||
throw SkipTestException("Test is disabled for DLIE");
|
|
||||||
#endif
|
|
||||||
|
|
||||||
runTensorFlowNet("unfused_flatten");
|
runTensorFlowNet("unfused_flatten");
|
||||||
runTensorFlowNet("unfused_flatten_unknown_batch");
|
runTensorFlowNet("unfused_flatten_unknown_batch");
|
||||||
}
|
}
|
||||||
@ -320,11 +307,14 @@ class Test_TensorFlow_nets : public DNNTestLayer {};
|
|||||||
|
|
||||||
TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
|
TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
|
||||||
{
|
{
|
||||||
checkBackend();
|
#if defined(INF_ENGINE_RELEASE)
|
||||||
if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) ||
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
|
||||||
(backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16))
|
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
|
||||||
throw SkipTestException("");
|
)
|
||||||
|
throw SkipTestException("Test is disabled for MyriadX");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
checkBackend();
|
||||||
std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
|
std::string netPath = findDataFile("dnn/ssd_mobilenet_v1_coco.pb", false);
|
||||||
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false);
|
std::string netConfig = findDataFile("dnn/ssd_mobilenet_v1_coco.pbtxt", false);
|
||||||
std::string imgPath = findDataFile("dnn/street.png", false);
|
std::string imgPath = findDataFile("dnn/street.png", false);
|
||||||
@ -333,30 +323,18 @@ TEST_P(Test_TensorFlow_nets, MobileNet_SSD)
|
|||||||
resize(imread(imgPath), inp, Size(300, 300));
|
resize(imread(imgPath), inp, Size(300, 300));
|
||||||
inp = blobFromImage(inp, 1.0f / 127.5, Size(), Scalar(127.5, 127.5, 127.5), true);
|
inp = blobFromImage(inp, 1.0f / 127.5, Size(), Scalar(127.5, 127.5, 127.5), true);
|
||||||
|
|
||||||
std::vector<String> outNames(3);
|
Mat ref = blobFromNPY(findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco.detection_out.npy", false));
|
||||||
outNames[0] = "concat";
|
|
||||||
outNames[1] = "concat_1";
|
|
||||||
outNames[2] = "detection_out";
|
|
||||||
|
|
||||||
std::vector<Mat> refs(outNames.size());
|
|
||||||
for (int i = 0; i < outNames.size(); ++i)
|
|
||||||
{
|
|
||||||
std::string path = findDataFile("dnn/tensorflow/ssd_mobilenet_v1_coco." + outNames[i] + ".npy", false);
|
|
||||||
refs[i] = blobFromNPY(path);
|
|
||||||
}
|
|
||||||
|
|
||||||
Net net = readNetFromTensorflow(netPath, netConfig);
|
Net net = readNetFromTensorflow(netPath, netConfig);
|
||||||
net.setPreferableBackend(backend);
|
net.setPreferableBackend(backend);
|
||||||
net.setPreferableTarget(target);
|
net.setPreferableTarget(target);
|
||||||
|
|
||||||
net.setInput(inp);
|
net.setInput(inp);
|
||||||
|
Mat out = net.forward();
|
||||||
|
|
||||||
std::vector<Mat> output;
|
double scoreDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.0043 : default_l1;
|
||||||
net.forward(output, outNames);
|
double iouDiff = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.037 : default_lInf;
|
||||||
|
normAssertDetections(ref, out, "", 0.2, scoreDiff, iouDiff);
|
||||||
normAssert(refs[0].reshape(1, 1), output[0].reshape(1, 1), "", 1e-5, 1.5e-4);
|
|
||||||
normAssert(refs[1].reshape(1, 1), output[1].reshape(1, 1), "", 1e-5, 3e-4);
|
|
||||||
normAssertDetections(refs[2], output[2], "", 0.2);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
|
TEST_P(Test_TensorFlow_nets, Inception_v2_SSD)
|
||||||
@ -597,10 +575,6 @@ TEST_P(Test_TensorFlow_layers, fp16_weights)
|
|||||||
|
|
||||||
TEST_P(Test_TensorFlow_layers, fp16_padding_same)
|
TEST_P(Test_TensorFlow_layers, fp16_padding_same)
|
||||||
{
|
{
|
||||||
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2019010000)
|
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
|
|
||||||
throw SkipTestException("Test is disabled for DLIE");
|
|
||||||
#endif
|
|
||||||
#if defined(INF_ENGINE_RELEASE)
|
#if defined(INF_ENGINE_RELEASE)
|
||||||
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
|
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD
|
||||||
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
|
&& getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X
|
||||||
|
Loading…
Reference in New Issue
Block a user