From dc3406eed9f04f773c8e85af4f814086a3b2200c Mon Sep 17 00:00:00 2001 From: Dmitry Kurtaev Date: Mon, 15 Oct 2018 16:40:28 +0300 Subject: [PATCH] Fix Pooling and Convolution layers from Intel's Inference Engine --- modules/dnn/src/dnn.cpp | 10 +++++-- modules/dnn/src/layers/convolution_layer.cpp | 30 ++++++++++++++++++-- modules/dnn/src/layers/pooling_layer.cpp | 11 +++++++ 3 files changed, 47 insertions(+), 4 deletions(-) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 4e4fd9cbbd..0afa8d5a27 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -1511,10 +1511,10 @@ struct Net::Impl CV_Assert(!ieNode.empty()); ieNode->net = net; + auto weightableLayer = std::dynamic_pointer_cast(ieNode->layer); if ((preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD) && !fused) { ieNode->layer->precision = InferenceEngine::Precision::FP16; - auto weightableLayer = std::dynamic_pointer_cast(ieNode->layer); if (weightableLayer) { if (weightableLayer->_weights) @@ -1532,7 +1532,13 @@ struct Net::Impl } } } - + if (weightableLayer) + { + if (weightableLayer->_weights) + weightableLayer->blobs["weights"] = weightableLayer->_weights; + if (weightableLayer->_biases) + weightableLayer->blobs["biases"] = weightableLayer->_biases; + } ieNode->connect(ld.inputBlobsWrappers, ld.outputBlobsWrappers); net->addBlobs(ld.inputBlobsWrappers); net->addBlobs(ld.outputBlobsWrappers); diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 0f0d3d41e1..cfd0ee00c4 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -449,15 +449,28 @@ public: lp.precision = InferenceEngine::Precision::FP32; std::shared_ptr ieLayer(new InferenceEngine::ConvolutionLayer(lp)); +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) + ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width); + ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height); + ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width); + ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height); + ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width); + ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height); + ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width); + ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height); + ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width); + ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height); +#else ieLayer->_kernel_x = kernel.width; ieLayer->_kernel_y = kernel.height; ieLayer->_stride_x = stride.width; ieLayer->_stride_y = stride.height; - ieLayer->_out_depth = outCn; ieLayer->_padding_x = pad.width; ieLayer->_padding_y = pad.height; ieLayer->_dilation_x = dilation.width; ieLayer->_dilation_y = dilation.height; +#endif + ieLayer->_out_depth = outCn; ieLayer->_group = group; ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW); @@ -1659,15 +1672,28 @@ public: lp.precision = InferenceEngine::Precision::FP32; std::shared_ptr ieLayer(new InferenceEngine::DeconvolutionLayer(lp)); +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) + ieLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width); + ieLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height); + ieLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width); + ieLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height); + ieLayer->_padding.insert(InferenceEngine::X_AXIS, pad.width); + ieLayer->_padding.insert(InferenceEngine::Y_AXIS, pad.height); + ieLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad.width); + ieLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad.height); + ieLayer->_dilation.insert(InferenceEngine::X_AXIS, dilation.width); + ieLayer->_dilation.insert(InferenceEngine::Y_AXIS, dilation.height); +#else ieLayer->_kernel_x = kernel.width; ieLayer->_kernel_y = kernel.height; ieLayer->_stride_x = stride.width; ieLayer->_stride_y = stride.height; - ieLayer->_out_depth = numOutput; ieLayer->_padding_x = pad.width; ieLayer->_padding_y = pad.height; ieLayer->_dilation_x = dilation.width; ieLayer->_dilation_y = dilation.height; +#endif + ieLayer->_out_depth = numOutput; ieLayer->_group = group; ieLayer->_weights = wrapToInfEngineBlob(blobs[0], InferenceEngine::Layout::OIHW); diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index 028f4f8b6e..bb8483975f 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -268,6 +268,16 @@ public: { lp.type = "Pooling"; InferenceEngine::PoolingLayer* poolLayer = new InferenceEngine::PoolingLayer(lp); +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2018R3) + poolLayer->_kernel.insert(InferenceEngine::X_AXIS, kernel.width); + poolLayer->_kernel.insert(InferenceEngine::Y_AXIS, kernel.height); + poolLayer->_stride.insert(InferenceEngine::X_AXIS, stride.width); + poolLayer->_stride.insert(InferenceEngine::Y_AXIS, stride.height); + poolLayer->_padding.insert(InferenceEngine::X_AXIS, pad_l); + poolLayer->_padding.insert(InferenceEngine::Y_AXIS, pad_t); + poolLayer->_pads_end.insert(InferenceEngine::X_AXIS, pad_r); + poolLayer->_pads_end.insert(InferenceEngine::Y_AXIS, pad_b); +#else poolLayer->_kernel_x = kernel.width; poolLayer->_kernel_y = kernel.height; poolLayer->_stride_x = stride.width; @@ -276,6 +286,7 @@ public: poolLayer->_padding_y = pad_t; poolLayer->params["pad-r"] = format("%d", pad_r); poolLayer->params["pad-b"] = format("%d", pad_b); +#endif poolLayer->_exclude_pad = type == AVE && padMode == "SAME"; poolLayer->params["rounding-type"] = ceilMode ? "ceil" : "floor"; poolLayer->_type = type == MAX ? InferenceEngine::PoolingLayer::PoolType::MAX :