Merge pull request #15063 from dkurt:dnn_ie_ocv_layers

* Wrap unsupported by IE layers as custom layers

* Replace pointers to layers blobs to their shapes

* Enable Faster R-CNN with IE backend on CPU
This commit is contained in:
Dmitry Kurtaev 2019-09-03 16:58:57 +01:00 committed by Alexander Alekhin
parent 7e46766c8d
commit ba703157cf
10 changed files with 365 additions and 27 deletions

View File

@ -1555,6 +1555,31 @@ struct Net::Impl
Ptr<Layer> layer = ld.layerInstance;
if (!fused && !layer->supportBackend(preferableBackend))
{
bool customizable = ld.id != 0 && ld.outputBlobs.size() == 1;
// TODO: there is a bug in Myriad plugin with custom layers shape infer.
if (preferableTarget == DNN_TARGET_MYRIAD)
{
for (int i = 0; customizable && i < ld.inputBlobs.size(); ++i)
{
customizable = ld.inputBlobs[i]->size[0] == 1;
}
}
// TODO: fix these workarounds
if (preferableTarget == DNN_TARGET_MYRIAD ||
preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Concat";
if (preferableTarget == DNN_TARGET_OPENCL ||
preferableTarget == DNN_TARGET_OPENCL_FP16)
customizable &= ld.type != "Power";
if (preferableTarget == DNN_TARGET_OPENCL)
customizable &= ld.type != "Eltwise";
if (!customizable)
{
addInfEngineNetOutputs(ld);
net = Ptr<InfEngineBackendNet>();
@ -1562,6 +1587,7 @@ struct Net::Impl
layer->preferableTarget = DNN_TARGET_CPU;
continue;
}
}
ld.skip = true; // Initially skip all Inference Engine supported layers.
// Create a new network if one of inputs from different Inference Engine graph.
@ -1599,7 +1625,13 @@ struct Net::Impl
if (!fused)
{
if (layer->supportBackend(preferableBackend))
node = layer->initInfEngine(ld.inputBlobsWrappers);
else
{
node = Ptr<BackendNode>(new InfEngineBackendNode(
ld.layerInstance, ld.inputBlobs, ld.outputBlobs, ld.internals));
}
}
else if (node.empty())
continue;

View File

@ -6,6 +6,7 @@
// Third party copyrights are property of their respective owners.
#include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "layers_common.hpp"
#ifdef HAVE_OPENCL
@ -23,6 +24,11 @@ public:
CV_Assert(blobs.size() == 1);
}
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE;
}
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
@ -58,6 +64,15 @@ public:
outputs_arr.getMatVector(outputs);
blobs[0].copyTo(outputs[0]);
}
#ifdef HAVE_INF_ENGINE
virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
{
InferenceEngine::Builder::ConstLayer ieLayer(name);
ieLayer.setData(wrapToInfEngineBlob(blobs[0]));
return Ptr<BackendNode>(new InfEngineBackendNode(ieLayer));
}
#endif // HAVE_INF_ENGINE
};
Ptr<Layer> ConstLayer::create(const LayerParams& params)

View File

@ -14,6 +14,7 @@ class CropAndResizeLayerImpl CV_FINAL : public CropAndResizeLayer
public:
CropAndResizeLayerImpl(const LayerParams& params)
{
setParamsFrom(params);
CV_Assert_N(params.has("width"), params.has("height"));
outWidth = params.get<float>("width");
outHeight = params.get<float>("height");

View File

@ -927,7 +927,7 @@ public:
ieLayer.setShareLocation(_shareLocation);
ieLayer.setBackgroudLabelId(_backgroundLabelId);
ieLayer.setNMSThreshold(_nmsThreshold);
ieLayer.setTopK(_topK);
ieLayer.setTopK(_topK > 0 ? _topK : _keepTopK);
ieLayer.setKeepTopK(_keepTopK);
ieLayer.setConfidenceThreshold(_confidenceThreshold);
ieLayer.setVariantEncodedInTarget(_varianceEncodedInTarget);

View File

@ -25,10 +25,186 @@ namespace cv { namespace dnn {
// OpenCV lets users use an empty input name and to prevent unexpected naming,
// we can use some predefined name.
static std::string kDefaultInpLayerName = "empty_inp_layer_name";
static std::string kOpenCVLayersType = "OpenCVLayer";
static std::string shapesToStr(const std::vector<Mat>& mats)
{
std::ostringstream shapes;
shapes << mats.size() << " ";
for (const Mat& m : mats)
{
shapes << m.dims << " ";
for (int i = 0; i < m.dims; ++i)
shapes << m.size[i] << " ";
}
return shapes.str();
}
static void strToShapes(const std::string& str, std::vector<std::vector<size_t> >& shapes)
{
std::istringstream ss(str);
int num, dims;
ss >> num;
shapes.resize(num);
for (int i = 0; i < num; ++i)
{
ss >> dims;
shapes[i].resize(dims);
for (int j = 0; j < dims; ++j)
ss >> shapes[i][j];
}
}
class InfEngineCustomLayer : public InferenceEngine::ILayerExecImpl
{
public:
explicit InfEngineCustomLayer(const InferenceEngine::CNNLayer& layer) : cnnLayer(layer)
{
std::istringstream iss(layer.GetParamAsString("impl"));
size_t ptr;
iss >> ptr;
cvLayer = (Layer*)ptr;
std::vector<std::vector<size_t> > shapes;
strToShapes(layer.GetParamAsString("internals"), shapes);
internals.resize(shapes.size());
for (int i = 0; i < shapes.size(); ++i)
internals[i].create(std::vector<int>(shapes[i].begin(), shapes[i].end()), CV_32F);
}
virtual InferenceEngine::StatusCode execute(std::vector<InferenceEngine::Blob::Ptr>& inputs,
std::vector<InferenceEngine::Blob::Ptr>& outputs,
InferenceEngine::ResponseDesc *resp) noexcept
{
std::vector<Mat> inpMats, outMats;
infEngineBlobsToMats(inputs, inpMats);
infEngineBlobsToMats(outputs, outMats);
try
{
cvLayer->forward(inpMats, outMats, internals);
return InferenceEngine::StatusCode::OK;
}
catch (...)
{
return InferenceEngine::StatusCode::GENERAL_ERROR;
}
}
virtual InferenceEngine::StatusCode
getSupportedConfigurations(std::vector<InferenceEngine::LayerConfig>& conf,
InferenceEngine::ResponseDesc* resp) noexcept
{
std::vector<InferenceEngine::DataConfig> inDataConfig;
std::vector<InferenceEngine::DataConfig> outDataConfig;
for (auto& it : cnnLayer.insData)
{
InferenceEngine::DataConfig conf;
conf.desc = it.lock()->getTensorDesc();
inDataConfig.push_back(conf);
}
for (auto& it : cnnLayer.outData)
{
InferenceEngine::DataConfig conf;
conf.desc = it->getTensorDesc();
outDataConfig.push_back(conf);
}
InferenceEngine::LayerConfig layerConfig;
layerConfig.inConfs = inDataConfig;
layerConfig.outConfs = outDataConfig;
conf.push_back(layerConfig);
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode init(InferenceEngine::LayerConfig& config,
InferenceEngine::ResponseDesc *resp) noexcept
{
return InferenceEngine::StatusCode::OK;
}
private:
InferenceEngine::CNNLayer cnnLayer;
dnn::Layer* cvLayer;
std::vector<Mat> internals;
};
class InfEngineCustomLayerShapeInfer : public InferenceEngine::IShapeInferImpl
{
public:
InferenceEngine::StatusCode
inferShapes(const std::vector<InferenceEngine::Blob::CPtr>& inBlobs,
const std::map<std::string, std::string>& params,
const std::map<std::string, InferenceEngine::Blob::Ptr>& blobs,
std::vector<InferenceEngine::SizeVector>& outShapes,
InferenceEngine::ResponseDesc* desc) noexcept override
{
strToShapes(params.at("outputs"), outShapes);
return InferenceEngine::StatusCode::OK;
}
};
class InfEngineCustomLayerFactory : public InferenceEngine::ILayerImplFactory {
public:
explicit InfEngineCustomLayerFactory(const InferenceEngine::CNNLayer* layer) : cnnLayer(*layer) {}
InferenceEngine::StatusCode
getImplementations(std::vector<InferenceEngine::ILayerImpl::Ptr>& impls,
InferenceEngine::ResponseDesc* resp) noexcept override {
impls.push_back(std::make_shared<InfEngineCustomLayer>(cnnLayer));
return InferenceEngine::StatusCode::OK;
}
private:
InferenceEngine::CNNLayer cnnLayer;
};
class InfEngineExtension : public InferenceEngine::IExtension
{
public:
virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {}
virtual void Unload() noexcept {}
virtual void Release() noexcept {}
virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}
virtual InferenceEngine::StatusCode getPrimitiveTypes(char**&, unsigned int&,
InferenceEngine::ResponseDesc*) noexcept
{
return InferenceEngine::StatusCode::OK;
}
InferenceEngine::StatusCode getFactoryFor(InferenceEngine::ILayerImplFactory*& factory,
const InferenceEngine::CNNLayer* cnnLayer,
InferenceEngine::ResponseDesc* resp) noexcept
{
if (cnnLayer->type != kOpenCVLayersType)
return InferenceEngine::StatusCode::NOT_IMPLEMENTED;
factory = new InfEngineCustomLayerFactory(cnnLayer);
return InferenceEngine::StatusCode::OK;
}
};
InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {}
InfEngineBackendNode::InfEngineBackendNode(Ptr<Layer>& cvLayer_, std::vector<Mat*>& inputs,
std::vector<Mat>& outputs,
std::vector<Mat>& internals)
: BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(cvLayer_->name),
cvLayer(cvLayer_)
{
CV_Assert(!cvLayer->name.empty());
layer.setName(cvLayer->name);
layer.setType(kOpenCVLayersType);
layer.getParameters()["impl"] = (size_t)cvLayer.get();
layer.getParameters()["outputs"] = shapesToStr(outputs);
layer.getParameters()["internals"] = shapesToStr(internals);
layer.setInputPorts(std::vector<InferenceEngine::Port>(inputs.size()));
layer.setOutputPorts(std::vector<InferenceEngine::Port>(outputs.size()));
}
static std::vector<Ptr<InfEngineBackendWrapper> >
infEngineWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
{
@ -111,6 +287,8 @@ void InfEngineBackendNet::init(int targetId)
#endif
netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer);
}
netBuilder.getContext().addShapeInferImpl(kOpenCVLayersType,
std::make_shared<InfEngineCustomLayerShapeInfer>());
cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build()));
}
@ -403,6 +581,7 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
try
{
AutoLock lock(getInitializationMutex());
InferenceEngine::Core& ie = getCore();
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
auto& sharedPlugins = getSharedPlugins();
auto pluginIt = sharedPlugins.find(device_name);
@ -464,7 +643,9 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
enginePtr->AddExtension(extension, 0);
#else
getCore().AddExtension(extension, "CPU");
ie.AddExtension(extension, "CPU");
// OpenCV fallbacks as extensions.
ie.AddExtension(std::make_shared<InfEngineExtension>(), "CPU");
#endif
CV_LOG_INFO(NULL, "DNN-IE: Loaded extension plugin: " << libName);
found = true;
@ -485,7 +666,7 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
}}, 0);
#else
if (device_name == "CPU")
getCore().SetConfig({{
ie.SetConfig({{
InferenceEngine::PluginConfigParams::KEY_CPU_THREADS_NUM, format("%d", getNumThreads()),
}}, device_name);
#endif
@ -495,7 +676,25 @@ void InfEngineBackendNet::initPlugin(InferenceEngine::CNNNetwork& net)
plugin = InferenceEngine::InferencePlugin(enginePtr);
netExec = plugin.LoadNetwork(net, {});
#else
netExec = getCore().LoadNetwork(net, device_name);
bool isHetero = false;
if (device_name != "CPU")
{
isHetero = device_name == "FPGA";
for (auto& layer : net)
{
if (layer->type == kOpenCVLayersType)
{
layer->affinity = "CPU";
isHetero = true;
}
else
layer->affinity = device_name;
}
}
if (isHetero)
netExec = ie.LoadNetwork(net, "HETERO:" + device_name + ",CPU");
else
netExec = ie.LoadNetwork(net, device_name);
#endif
}
catch (const std::exception& ex)
@ -672,6 +871,14 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob)
return Mat(size, type, (void*)blob->buffer());
}
void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
std::vector<Mat>& mats)
{
mats.resize(blobs.size());
for (int i = 0; i < blobs.size(); ++i)
mats[i] = infEngineBlobToMat(blobs[i]);
}
bool InfEngineBackendLayer::getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs,
std::vector<MatShape> &outputs,
@ -769,7 +976,8 @@ void resetMyriadDevice()
#if INF_ENGINE_VER_MAJOR_LE(INF_ENGINE_RELEASE_2019R1)
getSharedPlugins().erase("MYRIAD");
#else
getCore().UnregisterPlugin("MYRIAD");
// To unregister both "MYRIAD" and "HETERO:MYRIAD,CPU" plugins
getCore() = InferenceEngine::Core();
#endif
#endif // HAVE_INF_ENGINE
}

View File

@ -137,12 +137,17 @@ class InfEngineBackendNode : public BackendNode
public:
InfEngineBackendNode(const InferenceEngine::Builder::Layer& layer);
InfEngineBackendNode(Ptr<Layer>& layer, std::vector<Mat*>& inputs,
std::vector<Mat>& outputs, std::vector<Mat>& internals);
void connect(std::vector<Ptr<BackendWrapper> >& inputs,
std::vector<Ptr<BackendWrapper> >& outputs);
// Inference Engine network object that allows to obtain the outputs of this layer.
InferenceEngine::Builder::Layer layer;
Ptr<InfEngineBackendNet> net;
// CPU fallback in case of unsupported Inference Engine layer.
Ptr<dnn::Layer> cvLayer;
};
class InfEngineBackendWrapper : public BackendWrapper
@ -173,6 +178,9 @@ InferenceEngine::DataPtr infEngineDataNode(const Ptr<BackendWrapper>& ptr);
Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob);
void infEngineBlobsToMats(const std::vector<InferenceEngine::Blob::Ptr>& blobs,
std::vector<Mat>& mats);
// Convert Inference Engine blob with FP32 precision to FP16 precision.
// Allocates memory for a new blob.
InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob);

View File

@ -53,17 +53,6 @@ static std::string _tf(TString filename)
return (getOpenCVExtraDir() + "/dnn/") + filename;
}
static std::vector<String> getOutputsNames(const Net& net)
{
std::vector<String> names;
std::vector<int> outLayers = net.getUnconnectedOutLayers();
std::vector<String> layersNames = net.getLayerNames();
names.resize(outLayers.size());
for (size_t i = 0; i < outLayers.size(); ++i)
names[i] = layersNames[outLayers[i] - 1];
return names;
}
TEST(Test_Darknet, read_tiny_yolo_voc)
{
Net net = readNetFromDarknet(_tf("tiny-yolo-voc.cfg"));
@ -159,7 +148,7 @@ public:
net.setPreferableTarget(target);
net.setInput(inp);
std::vector<Mat> outs;
net.forward(outs, getOutputsNames(net));
net.forward(outs, net.getUnconnectedOutLayersNames());
for (int b = 0; b < batch_size; ++b)
{
@ -339,6 +328,62 @@ TEST_P(Test_Darknet_nets, TinyYoloVoc)
}
}
#ifdef HAVE_INF_ENGINE
static const std::chrono::milliseconds async_timeout(500);
typedef testing::TestWithParam<tuple<std::string, Target> > Test_Darknet_nets_async;
TEST_P(Test_Darknet_nets_async, Accuracy)
{
applyTestTag(CV_TEST_TAG_MEMORY_512MB);
std::string prefix = get<0>(GetParam());
int target = get<1>(GetParam());
const int numInputs = 2;
std::vector<Mat> inputs(numInputs);
int blobSize[] = {1, 3, 416, 416};
for (int i = 0; i < numInputs; ++i)
{
inputs[i].create(4, &blobSize[0], CV_32F);
randu(inputs[i], 0, 1);
}
Net netSync = readNet(findDataFile("dnn/" + prefix + ".cfg"),
findDataFile("dnn/" + prefix + ".weights", false));
netSync.setPreferableTarget(target);
// Run synchronously.
std::vector<Mat> refs(numInputs);
for (int i = 0; i < numInputs; ++i)
{
netSync.setInput(inputs[i]);
refs[i] = netSync.forward().clone();
}
Net netAsync = readNet(findDataFile("dnn/" + prefix + ".cfg"),
findDataFile("dnn/" + prefix + ".weights", false));
netAsync.setPreferableTarget(target);
// Run asynchronously. To make test more robust, process inputs in the reversed order.
for (int i = numInputs - 1; i >= 0; --i)
{
netAsync.setInput(inputs[i]);
AsyncArray out = netAsync.forwardAsync();
ASSERT_TRUE(out.valid());
Mat result;
EXPECT_TRUE(out.get(result, async_timeout));
normAssert(refs[i], result, format("Index: %d", i).c_str(), 0, 0);
}
}
INSTANTIATE_TEST_CASE_P(/**/, Test_Darknet_nets_async, Combine(
Values("yolo-voc", "tiny-yolo-voc", "yolov3"),
ValuesIn(getAvailableTargets(DNN_BACKEND_INFERENCE_ENGINE))
));
#endif
TEST_P(Test_Darknet_nets, YOLOv3)
{
applyTestTag(CV_TEST_TAG_LONG, (target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_1GB : CV_TEST_TAG_MEMORY_2GB));
@ -376,6 +421,16 @@ TEST_P(Test_Darknet_nets, YOLOv3)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL) // Test with 'batch size 2' is disabled for DLIE/OpenCL target
#endif
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_OPENCL)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
if (target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
}
#endif
{
SCOPED_TRACE("batch size 2");
testDarknetModel(config_file, weights_file, ref, scoreDiff, iouDiff);

View File

@ -554,6 +554,11 @@ TEST_P(ReLU, Accuracy)
Backend backendId = get<0>(get<1>(GetParam()));
Target targetId = get<1>(get<1>(GetParam()));
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD && negativeSlope < 0)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
#endif
LayerParams lp;
lp.set("negative_slope", negativeSlope);
lp.type = "ReLU";

View File

@ -1112,7 +1112,7 @@ INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_DLDT_two_inputs, Combine(
class UnsupportedLayer : public Layer
{
public:
UnsupportedLayer(const LayerParams &params) {}
UnsupportedLayer(const LayerParams &params) : Layer(params) {}
static Ptr<Layer> create(const LayerParams& params)
{

View File

@ -145,8 +145,17 @@ TEST_P(Test_TensorFlow_layers, padding)
{
runTensorFlowNet("padding_valid");
runTensorFlowNet("spatial_padding");
runTensorFlowNet("keras_pad_concat");
runTensorFlowNet("mirror_pad");
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000)
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
{
if (target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
if (target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
}
#endif
runTensorFlowNet("keras_pad_concat");
}
TEST_P(Test_TensorFlow_layers, padding_same)
@ -472,7 +481,7 @@ TEST_P(Test_TensorFlow_nets, Faster_RCNN)
"faster_rcnn_resnet50_coco_2018_01_28"};
checkBackend();
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE);
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
@ -573,6 +582,10 @@ TEST_P(Test_TensorFlow_nets, EAST_text_detection)
#if defined(INF_ENGINE_RELEASE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD);
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16 &&
INF_ENGINE_VER_MAJOR_EQ(2019020000))
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_2019R2);
#endif
checkBackend();
@ -673,7 +686,8 @@ TEST_P(Test_TensorFlow_layers, lstm)
TEST_P(Test_TensorFlow_layers, split)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE)
if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD &&
getInferenceEngineVPUType() == CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_2)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD_2);
runTensorFlowNet("split");
if (backend == DNN_BACKEND_INFERENCE_ENGINE)