From ca5976e3d4c5c868b8c542fd5109732d319155b2 Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Thu, 14 Feb 2019 13:30:30 +0300 Subject: [PATCH] Fix IE backend considering future changes. --- modules/dnn/src/dnn.cpp | 20 ++--- modules/dnn/src/layers/batch_norm_layer.cpp | 7 +- modules/dnn/src/layers/blank_layer.cpp | 4 +- modules/dnn/src/layers/convolution_layer.cpp | 17 ++--- modules/dnn/src/layers/elementwise_layers.cpp | 6 +- .../dnn/src/layers/fully_connected_layer.cpp | 7 +- .../dnn/src/layers/normalize_bbox_layer.cpp | 2 +- modules/dnn/src/layers/prior_box_layer.cpp | 6 +- modules/dnn/src/layers/scale_layer.cpp | 10 +-- modules/dnn/src/op_inf_engine.cpp | 75 ++++++++++++++++++- modules/dnn/src/op_inf_engine.hpp | 6 +- modules/dnn/test/test_halide_layers.cpp | 3 +- 12 files changed, 114 insertions(+), 49 deletions(-) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index fc5548cd8d..5b65a6c50a 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -730,9 +730,9 @@ struct DataLayer : public Layer biases->set(biasesVec); #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) - InferenceEngine::Builder::ScaleShiftLayer ieLayer(name); - ieLayer.setWeights(weights); - ieLayer.setBiases(biases); + InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); + addConstantData("weights", weights, ieLayer); + addConstantData("biases", biases, ieLayer); #else InferenceEngine::LayerParams lp; lp.name = name; @@ -1638,25 +1638,15 @@ struct Net::Impl preferableTarget == DNN_TARGET_FPGA) && !fused) { #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5) - bool hasWeights = false; for (const std::string& name : {"weights", "biases"}) { auto it = ieNode->layer.getParameters().find(name); if (it != ieNode->layer.getParameters().end()) { - InferenceEngine::Blob::CPtr bp = it->second.as(); - it->second = (InferenceEngine::Blob::CPtr)convertFp16(std::const_pointer_cast(bp)); - hasWeights = true; + InferenceEngine::Blob::Ptr bp = it->second.as(); + it->second = convertFp16(std::const_pointer_cast(bp)); } } - if (!hasWeights) - { - InferenceEngine::Blob::Ptr blob = InferenceEngine::make_shared_blob( - InferenceEngine::Precision::FP16, - InferenceEngine::Layout::C, {1}); - blob->allocate(); - ieNode->layer.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)blob; - } #else auto& blobs = ieNode->layer.getConstantData(); if (blobs.empty()) diff --git a/modules/dnn/src/layers/batch_norm_layer.cpp b/modules/dnn/src/layers/batch_norm_layer.cpp index 522d0229ba..4c69c247c4 100644 --- a/modules/dnn/src/layers/batch_norm_layer.cpp +++ b/modules/dnn/src/layers/batch_norm_layer.cpp @@ -350,11 +350,10 @@ public: { #ifdef HAVE_INF_ENGINE #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) - InferenceEngine::Builder::ScaleShiftLayer ieLayer(name); - + InferenceEngine::Builder::Layer ieLayer = InferenceEngine::Builder::ScaleShiftLayer(name); const size_t numChannels = weights_.total(); - ieLayer.setWeights(wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C)); - ieLayer.setBiases(wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C)); + addConstantData("weights", wrapToInfEngineBlob(weights_, {numChannels}, InferenceEngine::Layout::C), ieLayer); + addConstantData("biases", wrapToInfEngineBlob(bias_, {numChannels}, InferenceEngine::Layout::C), ieLayer); return Ptr(new InfEngineBackendNode(ieLayer)); #else InferenceEngine::LayerParams lp; diff --git a/modules/dnn/src/layers/blank_layer.cpp b/modules/dnn/src/layers/blank_layer.cpp index 96336808a0..c3a68a2a42 100644 --- a/modules/dnn/src/layers/blank_layer.cpp +++ b/modules/dnn/src/layers/blank_layer.cpp @@ -125,7 +125,9 @@ public: ieLayer.getParameters()["axis"] = input->dims.size() - 1; ieLayer.getParameters()["out_sizes"] = input->dims[0]; } - ieLayer.setInputPorts(std::vector(1)); + std::vector shape(input->dims); + std::reverse(shape.begin(), shape.end()); + ieLayer.setInputPorts({InferenceEngine::Port(shape)}); ieLayer.setOutputPorts(std::vector(1)); return Ptr(new InfEngineBackendNode(ieLayer)); #else diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 31665d7dcc..60611b52b2 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -493,11 +493,11 @@ public: ieLayer.setGroup((size_t)group); ieLayer.setOutDepth((size_t)outCn); - ieLayer.setWeights(ieWeights); - if (ieBiases) - ieLayer.setBiases(ieBiases); - InferenceEngine::Builder::Layer l = ieLayer; + addConstantData("weights", ieWeights, l); + if (ieBiases) + addConstantData("biases", ieBiases, l); + if (!padMode.empty()) l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper"); @@ -1725,12 +1725,11 @@ public: ieLayer.setGroup((size_t)group); ieLayer.setOutDepth((size_t)numOutput); - ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW)); + InferenceEngine::Builder::Layer l = ieLayer; + addConstantData("weights", wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW), l); if (hasBias()) - { - ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C)); - } - return Ptr(new InfEngineBackendNode(ieLayer)); + addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)numOutput}, InferenceEngine::Layout::C), l); + return Ptr(new InfEngineBackendNode(l)); #else const int outGroupCn = blobs[0].size[1]; // Weights are in IOHW layout const int group = numOutput / outGroupCn; diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index b2e0621d6d..a18cce6fa9 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -1134,10 +1134,10 @@ struct ChannelsPReLUFunctor #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InferenceEngine::Builder::Layer initInfEngineBuilderAPI() { - InferenceEngine::Builder::PReLULayer ieLayer(""); + InferenceEngine::Builder::Layer l = InferenceEngine::Builder::PReLULayer(""); const size_t numChannels = scale.total(); - ieLayer.setWeights(wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C)); - return ieLayer; + addConstantData("weights", wrapToInfEngineBlob(scale, {numChannels}, InferenceEngine::Layout::C), l); + return l; } #else InferenceEngine::CNNLayerPtr initInfEngine(InferenceEngine::LayerParams& lp) diff --git a/modules/dnn/src/layers/fully_connected_layer.cpp b/modules/dnn/src/layers/fully_connected_layer.cpp index 3a71a872fe..dcfa7d1dac 100644 --- a/modules/dnn/src/layers/fully_connected_layer.cpp +++ b/modules/dnn/src/layers/fully_connected_layer.cpp @@ -448,11 +448,12 @@ public: const int outNum = blobs[0].size[0]; ieLayer.setOutputNum(outNum); - ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW)); + InferenceEngine::Builder::Layer l = ieLayer; + addConstantData("weights", wrapToInfEngineBlob(blobs[0], {(size_t)blobs[0].size[0], (size_t)blobs[0].size[1], 1, 1}, InferenceEngine::Layout::OIHW), l); if (blobs.size() > 1) - ieLayer.setBiases(wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C)); + addConstantData("biases", wrapToInfEngineBlob(blobs[1], {(size_t)outNum}, InferenceEngine::Layout::C), l); - return Ptr(new InfEngineBackendNode(ieLayer)); + return Ptr(new InfEngineBackendNode(l)); #else InferenceEngine::LayerParams lp; lp.name = name; diff --git a/modules/dnn/src/layers/normalize_bbox_layer.cpp b/modules/dnn/src/layers/normalize_bbox_layer.cpp index 8e21f116e4..cf968f823f 100644 --- a/modules/dnn/src/layers/normalize_bbox_layer.cpp +++ b/modules/dnn/src/layers/normalize_bbox_layer.cpp @@ -295,7 +295,7 @@ public: l.getParameters()["channel_shared"] = blobs[0].total() == 1; } #if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5) - l.getParameters()["weights"] = (InferenceEngine::Blob::CPtr)weights; + l.getParameters()["weights"] = weights; #else l.addConstantData("weights", weights); #endif diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index ac11fe7ada..b2907b7b8b 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -524,12 +524,12 @@ public: if (_stepX == _stepY) { l.getParameters()["step"] = _stepX; - l.getParameters()["step_h"] = 0.0; - l.getParameters()["step_w"] = 0.0; + l.getParameters()["step_h"] = 0.0f; + l.getParameters()["step_w"] = 0.0f; } else { - l.getParameters()["step"] = 0.0; + l.getParameters()["step"] = 0.0f; l.getParameters()["step_h"] = _stepY; l.getParameters()["step_w"] = _stepX; } diff --git a/modules/dnn/src/layers/scale_layer.cpp b/modules/dnn/src/layers/scale_layer.cpp index a11fd379a2..d911905d36 100644 --- a/modules/dnn/src/layers/scale_layer.cpp +++ b/modules/dnn/src/layers/scale_layer.cpp @@ -198,13 +198,13 @@ public: { #ifdef HAVE_INF_ENGINE #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) - InferenceEngine::Builder::ScaleShiftLayer ieLayer(name); + InferenceEngine::Builder::Layer l = InferenceEngine::Builder::ScaleShiftLayer(name); CV_Assert(!blobs.empty()); const size_t numChannels = blobs[0].total(); if (hasWeights) { - ieLayer.setWeights(wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C)); + addConstantData("weights", wrapToInfEngineBlob(blobs[0], {numChannels}, InferenceEngine::Layout::C), l); } else { @@ -214,11 +214,11 @@ public: std::vector ones(numChannels, 1); weights->set(ones); - ieLayer.setWeights(weights); + addConstantData("weights", weights, l); } if (hasBias) - ieLayer.setBiases(wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C)); - return Ptr(new InfEngineBackendNode(ieLayer)); + addConstantData("biases", wrapToInfEngineBlob(blobs.back(), {numChannels}, InferenceEngine::Layout::C), l); + return Ptr(new InfEngineBackendNode(l)); #else InferenceEngine::LayerParams lp; lp.name = name; diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index a452064337..ddaab41cdc 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -18,6 +18,11 @@ namespace cv { namespace dnn { #ifdef HAVE_INF_ENGINE +// For networks with input layer which has an empty name, IE generates a name id[some_number]. +// OpenCV lets users use an empty input name and to prevent unexpected naming, +// we can use some predefined name. +static std::string kDefaultInpLayerName = "empty_inp_layer_name"; + #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) InfEngineBackendNode::InfEngineBackendNode(const InferenceEngine::Builder::Layer& _layer) : BackendNode(DNN_BACKEND_INFERENCE_ENGINE), layer(_layer) {} @@ -90,7 +95,7 @@ void InfEngineBackendNet::connect(const std::vector >& input it = layers.find(inpName); if (it == layers.end()) { - InferenceEngine::Builder::InputLayer inpLayer(inpName); + InferenceEngine::Builder::InputLayer inpLayer(!inpName.empty() ? inpName : kDefaultInpLayerName); std::vector shape(inp->blob->dims()); std::reverse(shape.begin(), shape.end()); @@ -119,6 +124,14 @@ void InfEngineBackendNet::init(int targetId) for (int id : unconnectedLayersIds) { InferenceEngine::Builder::OutputLayer outLayer("myconv1"); +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5) + // Inference Engine determines network precision by ports. + InferenceEngine::Precision p = (targetId == DNN_TARGET_MYRIAD || + targetId == DNN_TARGET_OPENCL_FP16) ? + InferenceEngine::Precision::FP16 : + InferenceEngine::Precision::FP32; + outLayer.setPort(InferenceEngine::Port({}, p)); +#endif netBuilder.addLayer({InferenceEngine::PortInfo(id)}, outLayer); } cnn = InferenceEngine::CNNNetwork(InferenceEngine::Builder::convertToICNNNetwork(netBuilder.build())); @@ -167,12 +180,56 @@ void InfEngineBackendNet::init(int targetId) initPlugin(cnn); } -void InfEngineBackendNet::addLayer(const InferenceEngine::Builder::Layer& layer) +void InfEngineBackendNet::addLayer(InferenceEngine::Builder::Layer& layer) { +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5) + // Add weights to network and connect them after input blobs. + std::map& params = layer.getParameters(); + std::vector blobsIds; + std::vector portIds; + for (const std::string& name : {"weights", "biases"}) + { + bool asInput = false; + int portId = 0; + for (int i = 0; i < layer.getInputPorts().size(); ++i) + { + const auto& port = layer.getInputPorts()[i]; + auto it = port.getParameters().find("type"); + if (it != port.getParameters().end() && it->second == name) + { + portId = i; + asInput = true; + break; + } + } + + if (!asInput) + continue; + + auto it = params.find(name); + if (it != params.end()) + { + InferenceEngine::Blob::Ptr blob = it->second.as(); + params.erase(it); + int blobId = netBuilder.addLayer(InferenceEngine::Builder::ConstLayer(name).setData(blob)); + blobsIds.push_back(blobId); + portIds.push_back(portId); + } + } +#endif + int id = netBuilder.addLayer(layer); const std::string& layerName = layer.getName(); CV_Assert(layers.insert({layerName, id}).second); unconnectedLayersIds.insert(id); + +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5) + // By default, all the weights are connected to last ports ids. + for (int i = 0; i < blobsIds.size(); ++i) + { + netBuilder.connect((size_t)blobsIds[i], {(size_t)id, portIds[i]}); + } +#endif } void InfEngineBackendNet::addOutput(const std::string& name) @@ -705,7 +762,7 @@ void InfEngineBackendNet::addBlobs(const std::vector >& ptrs { std::string name = wrapper->dataPtr->name; #if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) - name = name.empty() ? "id1" : name; // TODO: drop the magic input name. + name = name.empty() ? kDefaultInpLayerName : name; #endif allBlobs.insert({name, wrapper->blob}); } @@ -776,6 +833,18 @@ InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob) return halfs; } +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) +void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, + InferenceEngine::Builder::Layer& l) +{ +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R5) + l.getParameters()[name] = data; +#else + l.addConstantData(name, data); +#endif +} +#endif + #endif // HAVE_INF_ENGINE bool haveInfEngine() diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index e912725296..ac72c0c69c 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -162,7 +162,7 @@ public: InfEngineBackendNet(InferenceEngine::CNNNetwork& net); - void addLayer(const InferenceEngine::Builder::Layer& layer); + void addLayer(InferenceEngine::Builder::Layer& layer); void addOutput(const std::string& name); @@ -255,6 +255,10 @@ Mat infEngineBlobToMat(const InferenceEngine::Blob::Ptr& blob); // Allocates memory for a new blob. InferenceEngine::Blob::Ptr convertFp16(const InferenceEngine::Blob::Ptr& blob); +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2018R5) +void addConstantData(const std::string& name, InferenceEngine::Blob::Ptr data, InferenceEngine::Builder::Layer& l); +#endif + // This is a fake class to run networks from Model Optimizer. Objects of that // class simulate responses of layers are imported by OpenCV and supported by // Inference Engine. The main difference is that they do not perform forward pass. diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index 879dd7bbf0..92af2e94ee 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -695,7 +695,8 @@ TEST_P(Eltwise, Accuracy) Target targetId = get<1>(get<4>(GetParam())); #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE > 2018050000 - if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_OPENCL) + if (backendId == DNN_BACKEND_INFERENCE_ENGINE && + (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) throw SkipTestException(""); #endif