// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // Copyright (C) 2016, Intel Corporation, all rights reserved. // Third party copyrights are property of their respective owners. /* Implementation of shift layer, which adds up const values to blob. */ #include "../precomp.hpp" #include "../op_inf_engine.hpp" #include namespace cv { namespace dnn { class ShiftLayerImpl CV_FINAL : public ShiftLayer { public: ShiftLayerImpl(const LayerParams ¶ms) { setParamsFrom(params); CV_Assert(blobs.size() == 1); } virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_DEFAULT || backendId == DNN_BACKEND_INFERENCE_ENGINE && haveInfEngine(); } bool getMemoryShapes(const std::vector &inputs, const int requiredOutputs, std::vector &outputs, std::vector &internals) const CV_OVERRIDE { Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals); internals.assign(1, shape(1, total(inputs[0], 2))); return true; } void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE { CV_TRACE_FUNCTION(); CV_TRACE_ARG_VALUE(name, "name", name.c_str()); Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr); } virtual void forward(std::vector &inputs, std::vector &outputs, std::vector &internals) CV_OVERRIDE { CV_TRACE_FUNCTION(); CV_TRACE_ARG_VALUE(name, "name", name.c_str()); CV_Assert(inputs.size() > 0); CV_Assert(blobs.size() > 0); if(inputs[0]->dims == blobs[0].dims) { for (size_t ii = 0; ii < outputs.size(); ii++) { Mat &inpBlob = *inputs[ii]; Mat &outBlob = outputs[ii]; outBlob = inpBlob + blobs[0]; } } else { Mat biasOnesMat = internals[0]; biasOnesMat.setTo(1); for (size_t ii = 0; ii < outputs.size(); ii++) { Mat &inpBlob = *inputs[ii]; Mat &outBlob = outputs[ii]; inpBlob.copyTo(outBlob); for (int n = 0; n < inpBlob.size[0]; n++) { Mat dstMat(inpBlob.size[1], inpBlob.size[2] * inpBlob.size[3], outBlob.type(), outBlob.ptr(n)); gemm(blobs[0], biasOnesMat, 1, dstMat, 1, dstMat); //TODO: gemv } } } } virtual Ptr tryAttach(const Ptr& node) CV_OVERRIDE { switch (node->backendId) { case DNN_BACKEND_INFERENCE_ENGINE: { #ifdef HAVE_INF_ENGINE auto base = node.dynamicCast(); auto conv = std::dynamic_pointer_cast(base->layer); if (conv) { fuseConvWeights(conv, Mat(), blobs[0]); return base; } #endif // HAVE_INF_ENGINE break; } } return Ptr(); } virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { #ifdef HAVE_INF_ENGINE // Inference Engine has no layer just for biases. Create a linear // transformation layer with ones weights. InferenceEngine::LayerParams lp; lp.name = name; lp.type = "ScaleShift"; lp.precision = InferenceEngine::Precision::FP32; std::shared_ptr ieLayer(new InferenceEngine::ScaleShiftLayer(lp)); auto weights = InferenceEngine::make_shared_blob(InferenceEngine::Precision::FP32, {blobs[0].total()}); weights->allocate(); std::vector ones(blobs[0].total(), 1); weights->set(ones); ieLayer->_weights = weights; ieLayer->_biases = wrapToInfEngineBlob(blobs[0]); return Ptr(new InfEngineBackendNode(ieLayer)); #endif // HAVE_INF_ENGINE return Ptr(); } void getScaleShift(Mat& scale, Mat& shift) const CV_OVERRIDE { scale = Mat(); shift = blobs[0]; } virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE { (void)outputs; // suppress unused variable warning long flops = 0; for(int i= 0; i < inputs.size(); i++) { flops += total(inputs[i]); } return flops; } }; Ptr ShiftLayer::create(const LayerParams& params) { return Ptr(new ShiftLayerImpl(params)); } } }