// This file is part of OpenCV project. // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // Copyright (C) 2018, Intel Corporation, all rights reserved. // Third party copyrights are property of their respective owners. #include "../precomp.hpp" #include "../op_inf_engine.hpp" #include "../op_cuda.hpp" #include "layers_common.hpp" #ifdef HAVE_OPENCL #include "opencl_kernels_dnn.hpp" #endif #ifdef HAVE_CUDA #include "../cuda4dnn/primitives/const.hpp" using namespace cv::dnn::cuda4dnn; #endif namespace cv { namespace dnn { class ConstLayerImpl CV_FINAL : public ConstLayer { public: ConstLayerImpl(const LayerParams& params) { setParamsFrom(params); CV_Assert(blobs.size() == 1); } virtual bool supportBackend(int backendId) CV_OVERRIDE { return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_INFERENCE_ENGINE || backendId == DNN_BACKEND_CUDA; } virtual bool getMemoryShapes(const std::vector &inputs, const int requiredOutputs, std::vector &outputs, std::vector &internals) const CV_OVERRIDE { CV_Assert(inputs.empty()); outputs.assign(1, shape(blobs[0])); return false; } #ifdef HAVE_OPENCL bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { std::vector outputs; outs.getUMatVector(outputs); if (outs.depth() == CV_16S) convertFp16(blobs[0], outputs[0]); else blobs[0].copyTo(outputs[0]); return true; } #endif void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE { CV_TRACE_FUNCTION(); CV_TRACE_ARG_VALUE(name, "name", name.c_str()); CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget), forward_ocl(inputs_arr, outputs_arr, internals_arr)) std::vector outputs; outputs_arr.getMatVector(outputs); blobs[0].copyTo(outputs[0]); } #ifdef HAVE_INF_ENGINE virtual Ptr initInfEngine(const std::vector >&) CV_OVERRIDE { InferenceEngine::Builder::ConstLayer ieLayer(name); ieLayer.setData(wrapToInfEngineBlob(blobs[0])); return Ptr(new InfEngineBackendNode(ieLayer)); } #endif // HAVE_INF_ENGINE #ifdef HAVE_CUDA Ptr initCUDA( void *context_, const std::vector>& inputs, const std::vector>& outputs ) override { auto context = reinterpret_cast(context_); CV_Assert(blobs.size() == 1); return make_cuda_node(preferableTarget, std::move(context->stream), blobs[0]); } #endif }; Ptr ConstLayer::create(const LayerParams& params) { return Ptr(new ConstLayerImpl(params)); } }} // namespace cv::dnn