mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 09:25:45 +08:00

Add GELU layer for vision transformers * add gelu and gelu approximation * drop setKernelParams
3107 lines
89 KiB
C++
3107 lines
89 KiB
C++
/*M///////////////////////////////////////////////////////////////////////////////////////
|
|
//
|
|
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
|
|
//
|
|
// By downloading, copying, installing or using the software you agree to this license.
|
|
// If you do not agree to this license, do not download, install,
|
|
// copy or use the software.
|
|
//
|
|
//
|
|
// License Agreement
|
|
// For Open Source Computer Vision Library
|
|
//
|
|
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
|
|
// Copyright (C) 2017, Intel Corporation, all rights reserved.
|
|
// Third party copyrights are property of their respective owners.
|
|
//
|
|
// Redistribution and use in source and binary forms, with or without modification,
|
|
// are permitted provided that the following conditions are met:
|
|
//
|
|
// * Redistribution's of source code must retain the above copyright notice,
|
|
// this list of conditions and the following disclaimer.
|
|
//
|
|
// * Redistribution's in binary form must reproduce the above copyright notice,
|
|
// this list of conditions and the following disclaimer in the documentation
|
|
// and/or other materials provided with the distribution.
|
|
//
|
|
// * The name of the copyright holders may not be used to endorse or promote products
|
|
// derived from this software without specific prior written permission.
|
|
//
|
|
// This software is provided by the copyright holders and contributors "as is" and
|
|
// any express or implied warranties, including, but not limited to, the implied
|
|
// warranties of merchantability and fitness for a particular purpose are disclaimed.
|
|
// In no event shall the Intel Corporation or contributors be liable for any direct,
|
|
// indirect, incidental, special, exemplary, or consequential damages
|
|
// (including, but not limited to, procurement of substitute goods or services;
|
|
// loss of use, data, or profits; or business interruption) however caused
|
|
// and on any theory of liability, whether in contract, strict liability,
|
|
// or tort (including negligence or otherwise) arising in any way out of
|
|
// the use of this software, even if advised of the possibility of such damage.
|
|
//
|
|
//M*/
|
|
|
|
#include "../precomp.hpp"
|
|
#include "layers_common.hpp"
|
|
#include "../op_cuda.hpp"
|
|
#include "../op_halide.hpp"
|
|
#include "../op_inf_engine.hpp"
|
|
#include "../ie_ngraph.hpp"
|
|
#include "../op_vkcom.hpp"
|
|
#include "../op_webnn.hpp"
|
|
#include "../op_cann.hpp"
|
|
|
|
#include <opencv2/dnn/shape_utils.hpp>
|
|
#include <iostream>
|
|
#include <limits>
|
|
#include <cfenv>
|
|
|
|
#ifdef HAVE_OPENCL
|
|
#include "opencl_kernels_dnn.hpp"
|
|
#endif
|
|
|
|
#ifdef HAVE_CUDA
|
|
#include "../cuda4dnn/primitives/activation.hpp"
|
|
using namespace cv::dnn::cuda4dnn;
|
|
#endif
|
|
#include <opencv2/core/utils/logger.hpp>
|
|
|
|
namespace cv
|
|
{
|
|
namespace dnn
|
|
{
|
|
|
|
using std::abs;
|
|
using std::exp;
|
|
using std::expm1;
|
|
using std::tanh;
|
|
using std::pow;
|
|
using std::ceil;
|
|
using std::floor;
|
|
using std::log;
|
|
using std::log1p;
|
|
using std::sqrt;
|
|
using std::round;
|
|
using std::acos;
|
|
using std::acosh;
|
|
using std::asin;
|
|
using std::asinh;
|
|
using std::atan;
|
|
using std::atanh;
|
|
using std::cos;
|
|
using std::cosh;
|
|
using std::erf;
|
|
using std::sin;
|
|
using std::sinh;
|
|
using std::tan;
|
|
|
|
template<typename Func>
|
|
class ElementWiseLayer : public Func::Layer
|
|
{
|
|
public:
|
|
class PBody : public cv::ParallelLoopBody
|
|
{
|
|
public:
|
|
const Func* func_;
|
|
const Mat* src_;
|
|
Mat* dst_;
|
|
int nstripes_;
|
|
|
|
PBody(const Func &func, const Mat &src, Mat& dst, int nstripes)
|
|
{
|
|
func_ = &func;
|
|
src_ = &src;
|
|
dst_ = &dst;
|
|
nstripes_ = nstripes;
|
|
}
|
|
|
|
void operator()(const Range &r) const CV_OVERRIDE
|
|
{
|
|
int nstripes = nstripes_, nsamples = 1, outCn = 1;
|
|
size_t planeSize = 1;
|
|
|
|
if (src_->dims > 1)
|
|
{
|
|
nsamples = src_->size[0];
|
|
outCn = src_->size[1];
|
|
}
|
|
else
|
|
outCn = src_->size[0];
|
|
|
|
for (int i = 2; i < src_->dims; ++i)
|
|
planeSize *= src_->size[i];
|
|
|
|
size_t stripeSize = (planeSize + nstripes - 1)/nstripes;
|
|
size_t stripeStart = r.start*stripeSize;
|
|
size_t stripeEnd = std::min(r.end*stripeSize, planeSize);
|
|
|
|
for( int i = 0; i < nsamples; i++ )
|
|
{
|
|
const float* srcptr = src_->ptr<float>(i) + stripeStart;
|
|
float* dstptr = dst_->ptr<float>(i) + stripeStart;
|
|
func_->apply(srcptr, dstptr, (int)(stripeEnd - stripeStart), planeSize, 0, outCn);
|
|
}
|
|
}
|
|
};
|
|
|
|
ElementWiseLayer(const Func &f=Func()) { func = f; }
|
|
|
|
virtual bool supportBackend(int backendId) CV_OVERRIDE
|
|
{
|
|
return func.supportBackend(backendId, this->preferableTarget);
|
|
}
|
|
|
|
virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE
|
|
{
|
|
func.finalize();
|
|
}
|
|
|
|
virtual Ptr<BackendNode> tryAttach(const Ptr<BackendNode>& node) CV_OVERRIDE
|
|
{
|
|
switch (node->backendId)
|
|
{
|
|
case DNN_BACKEND_HALIDE:
|
|
{
|
|
#ifdef HAVE_HALIDE
|
|
auto base = node.dynamicCast<HalideBackendNode>();
|
|
Halide::Func& input = base->funcs.back();
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
Halide::Func top = (this->name.empty() ? Halide::Func() : Halide::Func(this->name));
|
|
func.attachHalide(input(x, y, c, n), top);
|
|
return Ptr<BackendNode>(new HalideBackendNode(base, top));
|
|
#endif // HAVE_HALIDE
|
|
break;
|
|
}
|
|
}
|
|
return Ptr<BackendNode>();
|
|
}
|
|
|
|
virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
|
|
{
|
|
#ifdef HAVE_HALIDE
|
|
Halide::Buffer<float> input = halideBuffer(inputs[0]);
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
Halide::Func top = (this->name.empty() ? Halide::Func() : Halide::Func(this->name));
|
|
func.attachHalide(input(x, y, c, n), top);
|
|
return Ptr<BackendNode>(new HalideBackendNode(top));
|
|
#endif // HAVE_HALIDE
|
|
return Ptr<BackendNode>();
|
|
}
|
|
|
|
#ifdef HAVE_CANN
|
|
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
|
{
|
|
return func.initCannOp(inputsWrapper, index, nodes);
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
|
{
|
|
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
|
|
auto node = func.initNgraphAPI(ieInpNode);
|
|
return Ptr<BackendNode>(new InfEngineNgraphNode(node));
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
#ifdef HAVE_WEBNN
|
|
virtual Ptr<BackendNode> initWebnn(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
|
{
|
|
Ptr<WebnnBackendNode> node = nodes[0].dynamicCast<WebnnBackendNode>();
|
|
auto& webnnInpOperand = node->operand;
|
|
auto& webnnGraphBuilder = node->net->builder;
|
|
auto operand = func.initWebnnAPI(webnnGraphBuilder, webnnInpOperand);
|
|
return Ptr<BackendNode>(new WebnnBackendNode(operand));
|
|
}
|
|
#endif
|
|
|
|
virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> >& inputs) CV_OVERRIDE
|
|
{
|
|
#ifdef HAVE_VULKAN
|
|
return Ptr<BackendNode>(new VkComBackendNode(inputs, func.initVkCom()));
|
|
#endif // HAVE_VULKAN
|
|
return Ptr<BackendNode>();
|
|
}
|
|
|
|
virtual bool tryFuse(Ptr<dnn::Layer>& top) CV_OVERRIDE
|
|
{
|
|
return func.tryFuse(top);
|
|
}
|
|
|
|
void getScaleShift(Mat& scale_, Mat& shift_) const CV_OVERRIDE
|
|
{
|
|
func.getScaleShift(scale_, shift_);
|
|
}
|
|
|
|
bool getMemoryShapes(const std::vector<MatShape> &inputs,
|
|
const int requiredOutputs,
|
|
std::vector<MatShape> &outputs,
|
|
std::vector<MatShape> &internals) const CV_OVERRIDE
|
|
{
|
|
Layer::getMemoryShapes(inputs, requiredOutputs, outputs, internals);
|
|
return true;
|
|
}
|
|
|
|
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
|
|
{
|
|
CV_TRACE_FUNCTION();
|
|
|
|
CV_OCL_RUN(IS_DNN_OPENCL_TARGET(this->preferableTarget),
|
|
func.applyOCL(inputs_arr, outputs_arr, internals_arr))
|
|
|
|
if (inputs_arr.depth() == CV_16S)
|
|
{
|
|
Layer::forward_fallback(inputs_arr, outputs_arr, internals_arr);
|
|
return;
|
|
}
|
|
|
|
std::vector<Mat> inputs, outputs;
|
|
inputs_arr.getMatVector(inputs);
|
|
outputs_arr.getMatVector(outputs);
|
|
|
|
for (size_t i = 0; i < inputs.size(); i++)
|
|
{
|
|
const Mat &src = inputs[i];
|
|
Mat &dst = outputs[i];
|
|
CV_Assert(src.size == dst.size && src.type() == dst.type() &&
|
|
src.isContinuous() && dst.isContinuous() && src.type() == CV_32F);
|
|
|
|
const int nstripes = getNumThreads();
|
|
PBody body(func, src, dst, nstripes);
|
|
parallel_for_(Range(0, nstripes), body, nstripes);
|
|
}
|
|
}
|
|
|
|
void forwardSlice(const float* src, float* dst, int len, size_t planeSize, int cn0, int cn1) const CV_OVERRIDE
|
|
{
|
|
func.apply(src, dst, len, planeSize, cn0, cn1);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(
|
|
void *context_,
|
|
const std::vector<Ptr<BackendWrapper>>& inputs,
|
|
const std::vector<Ptr<BackendWrapper>>& outputs
|
|
) override
|
|
{
|
|
auto context = reinterpret_cast<csl::CSLContext*>(context_);
|
|
return func.initCUDA(Layer::preferableTarget, context->stream);
|
|
}
|
|
#endif
|
|
|
|
virtual bool tryQuantize(const std::vector<std::vector<float> > &scales,
|
|
const std::vector<std::vector<int> > &zeropoints, LayerParams& params) CV_OVERRIDE
|
|
{
|
|
return func.tryQuantize(scales, zeropoints, params);
|
|
}
|
|
|
|
virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
|
|
const std::vector<MatShape> &outputs) const CV_OVERRIDE
|
|
{
|
|
long flops = 0;
|
|
for (int i = 0; i < outputs.size(); i++)
|
|
{
|
|
flops += total(outputs[i]) * func.getFLOPSPerElement();
|
|
}
|
|
return flops;
|
|
}
|
|
|
|
Func func;
|
|
};
|
|
|
|
#ifdef HAVE_OPENCL
|
|
static String oclGetTMacro(const UMat &m)
|
|
{
|
|
String str_name = ocl::typeToStr(m.type());
|
|
|
|
if (str_name == "short")
|
|
str_name = "half";
|
|
|
|
return format("-DT=%s -Dconvert_T=convert_%s ", str_name.c_str(), str_name.c_str());
|
|
}
|
|
#endif
|
|
|
|
struct BaseFunctor
|
|
{
|
|
void finalize() {}
|
|
|
|
bool tryFuse(Ptr<dnn::Layer>&) { return false; }
|
|
|
|
void getScaleShift(Mat&, Mat&) const {}
|
|
|
|
bool tryQuantize(const std::vector<std::vector<float>>&, const std::vector<std::vector<int>>&, LayerParams&) { return false; }
|
|
};
|
|
|
|
struct ReLUFunctor : public BaseFunctor
|
|
{
|
|
typedef ReLULayer Layer;
|
|
float slope;
|
|
|
|
explicit ReLUFunctor(float slope_=1.f) : slope(slope_) {}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
return true;
|
|
#endif
|
|
#ifdef HAVE_WEBNN
|
|
if (backendId == DNN_BACKEND_WEBNN) {
|
|
// TODO: support PRELU
|
|
if (slope != 0)
|
|
{
|
|
CV_LOG_WARNING(NULL, "PRELU is not supported now.");
|
|
}
|
|
return slope == 0;
|
|
}
|
|
#endif
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_VKCOM ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
|
{
|
|
float s = slope;
|
|
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
|
|
{
|
|
int i = 0;
|
|
#if CV_SIMD128
|
|
v_float32x4 s4 = v_setall_f32(s), z = v_setzero_f32();
|
|
for( ; i <= len - 16; i += 16 )
|
|
{
|
|
v_float32x4 x0 = v_load(srcptr + i);
|
|
v_float32x4 x1 = v_load(srcptr + i + 4);
|
|
v_float32x4 x2 = v_load(srcptr + i + 8);
|
|
v_float32x4 x3 = v_load(srcptr + i + 12);
|
|
x0 = v_select(x0 >= z, x0, x0*s4);
|
|
x1 = v_select(x1 >= z, x1, x1*s4);
|
|
x2 = v_select(x2 >= z, x2, x2*s4);
|
|
x3 = v_select(x3 >= z, x3, x3*s4);
|
|
v_store(dstptr + i, x0);
|
|
v_store(dstptr + i + 4, x1);
|
|
v_store(dstptr + i + 8, x2);
|
|
v_store(dstptr + i + 12, x3);
|
|
}
|
|
#endif
|
|
for( ; i < len; i++ )
|
|
{
|
|
float x = srcptr[i];
|
|
dstptr[i] = x >= 0.f ? x : s*x;
|
|
}
|
|
}
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::ReLUOp>(target, stream, slope);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_OPENCL
|
|
bool initKernel(ocl::Kernel &ker, const UMat &src) const
|
|
{
|
|
const char *buildoptSlope = (slope == 0) ? "-DRELU_NO_SLOPE" : "";
|
|
String buildopt = oclGetTMacro(src) + buildoptSlope;
|
|
|
|
if (!ker.create("ReLUForward", ocl::dnn::activations_oclsrc, buildopt))
|
|
return false;
|
|
|
|
if (slope != 0)
|
|
ker.set(3, (float)slope);
|
|
|
|
return true;
|
|
}
|
|
|
|
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
|
|
{
|
|
std::vector<UMat> inputs;
|
|
std::vector<UMat> outputs;
|
|
|
|
inps.getUMatVector(inputs);
|
|
outs.getUMatVector(outputs);
|
|
|
|
for (size_t i = 0; i < inputs.size(); i++)
|
|
{
|
|
UMat& src = inputs[i];
|
|
UMat& dst = outputs[i];
|
|
CV_Assert(src.isContinuous() && dst.isContinuous() && !src.offset && !dst.offset);
|
|
|
|
ocl::Kernel kernel;
|
|
CV_Assert(initKernel(kernel, src));
|
|
kernel.set(0, (int)src.total());
|
|
kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
|
|
kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
|
|
|
|
size_t gSize = src.total();
|
|
CV_Assert(kernel.run(1, &gSize, NULL, false));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
if (slope)
|
|
{
|
|
top(x, y, c, n) = select(input >= 0.0f, input, slope * input);
|
|
}
|
|
else
|
|
{
|
|
top(x, y, c, n) = max(input, 0.0f);
|
|
}
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
auto x_desc = x->getTensorDesc();
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
|
|
if (slope)
|
|
{
|
|
std::string op_name = cv::format("leakyrelu_%d", index);
|
|
auto op = std::make_shared<ge::op::LeakyRelu>(op_name);
|
|
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
op->set_attr_negative_slope(slope);
|
|
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
|
|
std::string op_name = cv::format("relu_%d", index);
|
|
auto op = std::make_shared<ge::op::Relu>(op_name); // FIXIT: Relu6?
|
|
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
if (slope) {
|
|
auto param = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &slope);
|
|
return std::make_shared<ngraph::op::PRelu>(node, param);
|
|
}
|
|
return std::make_shared<ngraph::op::Relu>(node);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
#ifdef HAVE_WEBNN
|
|
ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input)
|
|
{
|
|
return builder.Relu(input);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_VULKAN
|
|
std::shared_ptr<vkcom::OpBase> initVkCom()
|
|
{
|
|
std::shared_ptr<vkcom::OpBase> op(new vkcom::OpReLU(slope));
|
|
return op;
|
|
}
|
|
#endif // HAVE_VULKAN
|
|
|
|
bool tryQuantize(const std::vector<std::vector<float> > &scales,
|
|
const std::vector<std::vector<int> > &zeropoints, LayerParams& params)
|
|
{
|
|
if (slope != 0.f)
|
|
{
|
|
float inpScale = scales[0][0], outScale = scales[1][0];
|
|
int inpZp = zeropoints[0][0], outZp = zeropoints[1][0];
|
|
|
|
Mat lookUpTable(1, 256, CV_8S);
|
|
int8_t* table = lookUpTable.ptr<int8_t>();
|
|
for (int i = -128; i < 128; i++)
|
|
{
|
|
float x = inpScale*(i - inpZp);
|
|
float y = x >= 0.f ? x : slope*x;
|
|
int quantized = outZp + (int)std::round(y/outScale);
|
|
table[i+128] = saturate_cast<int8_t>(quantized);
|
|
}
|
|
params.blobs.clear();
|
|
params.blobs.push_back(lookUpTable);
|
|
}
|
|
params.set("input_scale", scales[0][0]);
|
|
params.set("input_zeropoint", zeropoints[0][0]);
|
|
params.set("slope", slope);
|
|
return true;
|
|
}
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
struct ReLU6Functor : public BaseFunctor
|
|
{
|
|
typedef ReLU6Layer Layer;
|
|
float minValue, maxValue;
|
|
|
|
ReLU6Functor(float minValue_ = 0.0f, float maxValue_ = 6.0f)
|
|
: minValue(minValue_), maxValue(maxValue_)
|
|
{
|
|
CV_Assert(minValue <= maxValue);
|
|
}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
#ifdef HAVE_INF_ENGINE
|
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
return true;
|
|
#endif
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_WEBNN ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
|
{
|
|
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
|
|
{
|
|
int i = 0;
|
|
#if CV_SIMD128
|
|
v_float32x4 minV = v_setall_f32(minValue), maxV = v_setall_f32(maxValue);
|
|
for( ; i <= len - 16; i += 16 )
|
|
{
|
|
v_float32x4 x0 = v_load(srcptr + i);
|
|
v_float32x4 x1 = v_load(srcptr + i + 4);
|
|
v_float32x4 x2 = v_load(srcptr + i + 8);
|
|
v_float32x4 x3 = v_load(srcptr + i + 12);
|
|
x0 = v_min(v_max(minV, x0), maxV);
|
|
x1 = v_min(v_max(minV, x1), maxV);
|
|
x2 = v_min(v_max(minV, x2), maxV);
|
|
x3 = v_min(v_max(minV, x3), maxV);
|
|
v_store(dstptr + i, x0);
|
|
v_store(dstptr + i + 4, x1);
|
|
v_store(dstptr + i + 8, x2);
|
|
v_store(dstptr + i + 12, x3);
|
|
}
|
|
#endif
|
|
for( ; i < len; i++ )
|
|
{
|
|
float x = srcptr[i];
|
|
if (x >= minValue)
|
|
dstptr[i] = x <= maxValue ? x : maxValue;
|
|
else
|
|
dstptr[i] = minValue;
|
|
}
|
|
}
|
|
}
|
|
|
|
#ifdef HAVE_OPENCL
|
|
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
|
|
{
|
|
std::vector<UMat> inputs;
|
|
std::vector<UMat> outputs;
|
|
|
|
inps.getUMatVector(inputs);
|
|
outs.getUMatVector(outputs);
|
|
String buildopt = oclGetTMacro(inputs[0]);
|
|
|
|
for (size_t i = 0; i < inputs.size(); i++)
|
|
{
|
|
UMat& src = inputs[i];
|
|
UMat& dst = outputs[i];
|
|
|
|
ocl::Kernel kernel("ReLU6Forward", ocl::dnn::activations_oclsrc, buildopt);
|
|
kernel.set(0, (int)src.total());
|
|
kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
|
|
kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
|
|
kernel.set(3, (float)minValue);
|
|
kernel.set(4, (float)maxValue);
|
|
|
|
size_t gSize = src.total();
|
|
CV_Assert(kernel.run(1, &gSize, NULL, false));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::ClippedReLUOp>(target, stream, minValue, maxValue);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = clamp(input, minValue, maxValue);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("clip_%d", index);
|
|
auto op = std::make_shared<ge::op::ClipByValue>(op_name);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
Mat min_value_mat(1, 1, CV_32F, Scalar(minValue));
|
|
std::vector<int> shape_{1};
|
|
auto op_const_minv = std::make_shared<CannConstOp>(min_value_mat.data, min_value_mat.type(), shape_, cv::format("%s_min_value", op_name.c_str()));
|
|
op->set_input_clip_value_min(*(op_const_minv->getOp()));
|
|
op->update_input_desc_clip_value_min(*(op_const_minv->getTensorDesc()));
|
|
|
|
Mat max_value_mat(1, 1, CV_32F, Scalar(maxValue));
|
|
auto op_const_maxv = std::make_shared<CannConstOp>(max_value_mat.data, max_value_mat.type(), shape_, cv::format("%s_max_value", op_name.c_str()));
|
|
op->set_input_clip_value_min(*(op_const_maxv->getOp()));
|
|
op->update_input_desc_clip_value_min(*(op_const_maxv->getTensorDesc()));
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif
|
|
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
return std::make_shared<ngraph::op::Clamp>(node, minValue, maxValue);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
|
|
|
|
#ifdef HAVE_WEBNN
|
|
ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input)
|
|
{
|
|
ml::ClampOptions clampOptions;
|
|
clampOptions.minValue = minValue;
|
|
clampOptions.maxValue = maxValue;
|
|
return builder.Clamp(input, &clampOptions);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_VULKAN
|
|
std::shared_ptr<vkcom::OpBase> initVkCom()
|
|
{
|
|
// TODO: add vkcom implementation
|
|
return std::shared_ptr<vkcom::OpBase>();
|
|
}
|
|
#endif // HAVE_VULKAN
|
|
|
|
bool tryQuantize(const std::vector<std::vector<float> > &scales,
|
|
const std::vector<std::vector<int> > &zeropoints, LayerParams& params)
|
|
{
|
|
params.set("input_scale", scales[0][0]);
|
|
params.set("input_zeropoint", zeropoints[0][0]);
|
|
return true;
|
|
}
|
|
|
|
int64 getFLOPSPerElement() const { return 2; }
|
|
};
|
|
|
|
template <class T>
|
|
struct BaseDefaultFunctor : public BaseFunctor
|
|
{
|
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
|
{
|
|
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
|
|
{
|
|
for( int i = 0; i < len; i++ )
|
|
{
|
|
float x = srcptr[i];
|
|
dstptr[i] = static_cast<const T*>(this)->calculate(x);
|
|
}
|
|
}
|
|
}
|
|
|
|
#ifdef HAVE_OPENCL
|
|
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
|
|
{
|
|
std::vector<UMat> inputs;
|
|
std::vector<UMat> outputs;
|
|
|
|
inps.getUMatVector(inputs);
|
|
outs.getUMatVector(outputs);
|
|
String buildopt = oclGetTMacro(inputs[0]);
|
|
|
|
for (size_t i = 0; i < inputs.size(); i++)
|
|
{
|
|
UMat& src = inputs[i];
|
|
UMat& dst = outputs[i];
|
|
|
|
ocl::Kernel kernel(ocl_kernel_name, ocl::dnn::activations_oclsrc, buildopt);
|
|
kernel.set(0, static_cast<int>(src.total()));
|
|
kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
|
|
kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
|
|
static_cast<const T*>(this)->setKernelParams(kernel);
|
|
|
|
size_t gSize = src.total();
|
|
CV_Assert(kernel.run(1, &gSize, nullptr, false));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
inline void setKernelParams(ocl::Kernel& kernel) const {}
|
|
|
|
bool tryQuantize(const std::vector<std::vector<float> > &scales,
|
|
const std::vector<std::vector<int> > &zeropoints, LayerParams& params)
|
|
{
|
|
float inpScale = scales[0][0], outScale = scales[1][0];
|
|
int inpZp = zeropoints[0][0], outZp = zeropoints[1][0];
|
|
|
|
Mat lookUpTable(1, 256, CV_8S);
|
|
int8_t* table = lookUpTable.ptr<int8_t>();
|
|
for (int i = -128; i < 128; i++)
|
|
{
|
|
float x = inpScale * static_cast<float>(i - inpZp);
|
|
float y = static_cast<T const*>(this)->calculate(x);
|
|
int quantized = outZp + static_cast<int>(std::round(y/outScale));
|
|
table[i+128] = saturate_cast<int8_t>(quantized);
|
|
}
|
|
params.blobs.clear();
|
|
params.blobs.push_back(lookUpTable);
|
|
params.set("input_scale", scales[0][0]);
|
|
params.set("input_zeropoint", zeropoints[0][0]);
|
|
return true;
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
CV_Error(Error::StsNotImplemented, "");
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
CV_Error(Error::StsNotImplemented, "");
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
CV_Error(Error::StsNotImplemented, "");
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
CV_Error(Error::StsNotImplemented, "");
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
#ifdef HAVE_WEBNN
|
|
ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input)
|
|
{
|
|
CV_Error(Error::StsNotImplemented, "");
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_VULKAN
|
|
std::shared_ptr<vkcom::OpBase> initVkCom()
|
|
{
|
|
// TODO: add vkcom implementation
|
|
return std::shared_ptr<vkcom::OpBase>();
|
|
}
|
|
#endif // HAVE_VULKAN
|
|
|
|
private:
|
|
static const char* const ocl_kernel_name;
|
|
};
|
|
|
|
struct GeluFunctor : public BaseDefaultFunctor<GeluFunctor>
|
|
{
|
|
typedef GeluLayer Layer;
|
|
|
|
explicit GeluFunctor() {}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return 0.5f * x * (1.0f + erf(x * M_SQRT1_2));
|
|
}
|
|
|
|
int64 getFLOPSPerElement() const { return 100; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<GeluFunctor>::ocl_kernel_name = "GeluForward";
|
|
|
|
namespace GeluApproximationConstants
|
|
{
|
|
static constexpr float sqrt_2_pi = 0.7978845834732056f;
|
|
static constexpr float coef_sqrt_2_pi = 0.044714998453855515f * sqrt_2_pi;
|
|
}
|
|
|
|
struct GeluApproximationFunctor : public BaseDefaultFunctor<GeluApproximationFunctor>
|
|
{
|
|
typedef GeluApproximationLayer Layer;
|
|
|
|
explicit GeluApproximationFunctor() {}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return 0.5f * x * (1.f + tanh(x * (GeluApproximationConstants::sqrt_2_pi +
|
|
GeluApproximationConstants::coef_sqrt_2_pi * x * x)));
|
|
}
|
|
|
|
int64 getFLOPSPerElement() const { return 100; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<GeluApproximationFunctor>::ocl_kernel_name = "GeluApproximationForward";
|
|
|
|
struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
|
|
{
|
|
typedef TanHLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
#ifdef HAVE_INF_ENGINE
|
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
return true;
|
|
#endif
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return tanh(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::TanHOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = tanh(input);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("tanh_%d", index);
|
|
auto op = std::make_shared<ge::op::Tanh>(op_name);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
return std::make_shared<ngraph::op::Tanh>(node);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const TanHFunctor::BaseDefaultFunctor<TanHFunctor>::ocl_kernel_name = "TanHForward";
|
|
|
|
struct SwishFunctor : public BaseDefaultFunctor<SwishFunctor>
|
|
{
|
|
typedef SwishLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return x / (1.f + exp(-x));
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::SwishOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = input / (1.0f + exp(-input));
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("swish_%d", index);
|
|
auto op = std::make_shared<ge::op::Swish>(op_name);
|
|
|
|
op->set_attr_scale(1.0f);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
auto sigmoid = std::make_shared<ngraph::op::Sigmoid>(node);
|
|
return std::make_shared<ngraph::op::v1::Multiply>(node, sigmoid);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
int64 getFLOPSPerElement() const { return 3; }
|
|
};
|
|
|
|
template<>
|
|
const char* const SwishFunctor::BaseDefaultFunctor<SwishFunctor>::ocl_kernel_name = "SwishForward";
|
|
|
|
struct MishFunctor : public BaseDefaultFunctor<MishFunctor>
|
|
{
|
|
typedef MishLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
// Use fast approximation introduced in https://github.com/opencv/opencv/pull/17200
|
|
if (x >= 8.f)
|
|
{
|
|
return x;
|
|
}
|
|
|
|
float eX = exp(x);
|
|
float n = (eX + 2.f) * eX;
|
|
return (x * n) / (n + 2.f);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::MishOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = input * tanh(log(1.0f + exp(input)));
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("mish_%d", index);
|
|
auto op = std::make_shared<ge::op::Mish>(op_name);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
float one = 1.0f;
|
|
auto constant = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &one);
|
|
auto exp_node = std::make_shared<ngraph::op::v0::Exp>(node);
|
|
auto sum = std::make_shared<ngraph::op::v1::Add>(constant, exp_node, ngraph::op::AutoBroadcastType::NUMPY);
|
|
auto log_node = std::make_shared<ngraph::op::v0::Log>(sum);
|
|
auto tanh_node = std::make_shared<ngraph::op::Tanh>(log_node);
|
|
return std::make_shared<ngraph::op::v1::Multiply>(node, tanh_node);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
int64 getFLOPSPerElement() const { return 3; }
|
|
};
|
|
|
|
template<>
|
|
const char* const MishFunctor::BaseDefaultFunctor<MishFunctor>::ocl_kernel_name = "MishForward";
|
|
|
|
struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
|
|
{
|
|
typedef SigmoidLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
#ifdef HAVE_INF_ENGINE
|
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
return true;
|
|
#endif
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return 1.f / (1.f + exp(-x));
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::SigmoidOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = 1.0f / (1.0f + exp(-input));
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("sigmoid_%d", index);
|
|
auto op = std::make_shared<ge::op::Sigmoid>(op_name);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
return std::make_shared<ngraph::op::Sigmoid>(node);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
int64 getFLOPSPerElement() const { return 3; }
|
|
};
|
|
|
|
template<>
|
|
const char* const SigmoidFunctor::BaseDefaultFunctor<SigmoidFunctor>::ocl_kernel_name = "SigmoidForward";
|
|
|
|
struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
|
|
{
|
|
typedef ELULayer Layer;
|
|
float alpha;
|
|
|
|
explicit ELUFunctor(float alpha_ = 1.f) : alpha(alpha_) {}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
#ifdef HAVE_INF_ENGINE
|
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
return true;
|
|
#endif
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return x >= 0.f ? x : alpha * (exp(x) - 1.f);
|
|
}
|
|
|
|
inline void setKernelParams(ocl::Kernel& kernel) const
|
|
{
|
|
kernel.set(3, alpha);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::ELUOp>(target, stream, alpha);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = select(input >= 0.0f, input, alpha * (exp(input) - 1));
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("elu_%d", index);
|
|
auto op = std::make_shared<ge::op::Elu>(op_name);
|
|
|
|
op->set_attr_alpha(alpha);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
return std::make_shared<ngraph::op::Elu>(node, alpha);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
int64 getFLOPSPerElement() const { return 2; }
|
|
};
|
|
|
|
template<>
|
|
const char* const ELUFunctor::BaseDefaultFunctor<ELUFunctor>::ocl_kernel_name = "ELUForward";
|
|
|
|
struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
|
|
{
|
|
typedef AbsLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
#ifdef HAVE_INF_ENGINE
|
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
return true;
|
|
#endif
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return abs(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::AbsValOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = abs(input);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("abs_%d", index);
|
|
auto op = std::make_shared<ge::op::Abs>(op_name);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
float coeff = -0.999999f;
|
|
// float coeff = preferableTarget == DNN_TARGET_MYRIAD ? -0.999f : -0.999999f;
|
|
auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &coeff);
|
|
return std::make_shared<ngraph::op::PRelu>(node, slope);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const AbsValFunctor::BaseDefaultFunctor<AbsValFunctor>::ocl_kernel_name = "AbsValForward";
|
|
|
|
struct BNLLFunctor : public BaseDefaultFunctor<BNLLFunctor>
|
|
{
|
|
typedef BNLLLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
// https://github.com/BVLC/caffe/blame/1.0/src/caffe/layers/bnll_layer.cpp#L17
|
|
return x > 0 ? x + log(1.f + exp(-x)) : log(1.f + exp(x));
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::BNLLOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("bnll_%d", index);
|
|
auto op = std::make_shared<ge::op::BNLL>(op_name);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
// https://github.com/BVLC/caffe/blame/1.0/src/caffe/layers/bnll_layer.cpp#L17
|
|
top(x, y, c, n) = max(input, 0) + log(1.0f + exp(-abs(input)));
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
int64 getFLOPSPerElement() const { return 5; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BNLLFunctor::BaseDefaultFunctor<BNLLFunctor>::ocl_kernel_name = "BNLLForward";
|
|
|
|
struct CeilFunctor : public BaseDefaultFunctor<CeilFunctor>
|
|
{
|
|
typedef CeilLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return ceil(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::CeilOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("bnll_%d", index);
|
|
auto op = std::make_shared<ge::op::BNLL>(op_name);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = ceil(input);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<CeilFunctor>::ocl_kernel_name = "CeilForward";
|
|
|
|
struct FloorFunctor : public BaseDefaultFunctor<FloorFunctor>
|
|
{
|
|
typedef FloorLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE ||
|
|
backendId == DNN_BACKEND_CANN;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return floor(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::FloorOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
|
|
std::string op_name = cv::format("floor_%d", index);
|
|
auto op = std::make_shared<ge::op::Floor>(op_name);
|
|
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
auto x_desc = x->getTensorDesc();
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = floor(input);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<FloorFunctor>::ocl_kernel_name = "FloorForward";
|
|
|
|
struct LogFunctor : public BaseDefaultFunctor<LogFunctor>
|
|
{
|
|
typedef LogLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return log(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::LogOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = log(input);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<LogFunctor>::ocl_kernel_name = "LogForward";
|
|
|
|
struct RoundFunctor : public BaseDefaultFunctor<RoundFunctor>
|
|
{
|
|
typedef RoundLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
// Rounds to even numbers in halfway cases, so 2.5 -> 2, -2.5 -> -2
|
|
int old_rounding_direction = std::fegetround();
|
|
std::fesetround(FE_TONEAREST);
|
|
float y = std::nearbyint(x);
|
|
std::fesetround(old_rounding_direction);
|
|
return y;
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::RoundOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = round(input);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
int64 getFLOPSPerElement() const { return 2; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<RoundFunctor>::ocl_kernel_name = "RoundForward";
|
|
|
|
struct SqrtFunctor : public BaseDefaultFunctor<SqrtFunctor>
|
|
{
|
|
typedef SqrtLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return sqrt(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::SqrtOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = sqrt(input);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
return std::make_shared<ngraph::op::v0::Sqrt>(node);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<SqrtFunctor>::ocl_kernel_name = "SqrtForward";
|
|
|
|
struct NotFunctor : public BaseDefaultFunctor<NotFunctor>
|
|
{
|
|
typedef NotLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA || backendId == DNN_BACKEND_HALIDE;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return floor(1.f - x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::NotOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = floor(1.0f - input);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
int64 getFLOPSPerElement() const { return 2; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<NotFunctor>::ocl_kernel_name = "NotForward";
|
|
|
|
struct AcosFunctor : public BaseDefaultFunctor<AcosFunctor>
|
|
{
|
|
typedef AcosLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return acos(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::AcosOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<AcosFunctor>::ocl_kernel_name = "AcosForward";
|
|
|
|
struct AcoshFunctor : public BaseDefaultFunctor<AcoshFunctor>
|
|
{
|
|
typedef AcoshLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return acosh(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::AcoshOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<AcoshFunctor>::ocl_kernel_name = "AcoshForward";
|
|
|
|
struct AsinFunctor : public BaseDefaultFunctor<AsinFunctor>
|
|
{
|
|
typedef AsinLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return asin(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::AsinOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<AsinFunctor>::ocl_kernel_name = "AsinForward";
|
|
|
|
struct AsinhFunctor : public BaseDefaultFunctor<AsinhFunctor>
|
|
{
|
|
typedef AsinhLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return asinh(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::AsinhOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<AsinhFunctor>::ocl_kernel_name = "AsinhForward";
|
|
|
|
struct AtanFunctor : public BaseDefaultFunctor<AtanFunctor>
|
|
{
|
|
typedef AtanLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return atan(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::AtanOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<AtanFunctor>::ocl_kernel_name = "AtanForward";
|
|
|
|
struct AtanhFunctor : public BaseDefaultFunctor<AtanhFunctor>
|
|
{
|
|
typedef AtanhLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return atanh(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::AtanhOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<AtanhFunctor>::ocl_kernel_name = "AtanhForward";
|
|
|
|
struct CosFunctor : public BaseDefaultFunctor<CosFunctor>
|
|
{
|
|
typedef CosLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return cos(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::CosOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<CosFunctor>::ocl_kernel_name = "CosForward";
|
|
|
|
struct CoshFunctor : public BaseDefaultFunctor<CoshFunctor>
|
|
{
|
|
typedef CoshLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return cosh(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::CoshOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<CoshFunctor>::ocl_kernel_name = "CoshForward";
|
|
|
|
struct ErfFunctor : public BaseDefaultFunctor<ErfFunctor>
|
|
{
|
|
typedef ErfLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return erf(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::ErfOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<ErfFunctor>::ocl_kernel_name = "ErfForward";
|
|
|
|
struct HardSwishFunctor : public BaseDefaultFunctor<HardSwishFunctor>
|
|
{
|
|
typedef HardSwishLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return x * max(0.f, min(1.f, x / 6.f + 0.5f));
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::HardSwishOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<HardSwishFunctor>::ocl_kernel_name = "HardSwishForward";
|
|
|
|
struct SinFunctor : public BaseDefaultFunctor<SinFunctor>
|
|
{
|
|
typedef SinLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return sin(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::SinOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<SinFunctor>::ocl_kernel_name = "SinForward";
|
|
|
|
struct SinhFunctor : public BaseDefaultFunctor<SinhFunctor>
|
|
{
|
|
typedef SinhLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return sinh(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::SinhOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<SinhFunctor>::ocl_kernel_name = "SinhForward";
|
|
|
|
struct SoftplusFunctor : public BaseDefaultFunctor<SoftplusFunctor>
|
|
{
|
|
typedef SoftplusLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return log1p(exp(x));
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::SoftplusOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<SoftplusFunctor>::ocl_kernel_name = "SoftplusForward";
|
|
|
|
struct SoftsignFunctor : public BaseDefaultFunctor<SoftsignFunctor>
|
|
{
|
|
typedef SoftsignLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return x / (1.f + abs(x));
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::SoftsignOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<SoftsignFunctor>::ocl_kernel_name = "SoftsignForward";
|
|
|
|
struct TanFunctor : public BaseDefaultFunctor<TanFunctor>
|
|
{
|
|
typedef TanLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return tan(x);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::TanOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<TanFunctor>::ocl_kernel_name = "TanForward";
|
|
|
|
struct CeluFunctor : public BaseDefaultFunctor<CeluFunctor>
|
|
{
|
|
typedef CeluLayer Layer;
|
|
|
|
float alpha;
|
|
|
|
explicit CeluFunctor(float alpha_ = 1.f) : alpha(alpha_) {}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return max(0.f, x) + min(0.f, alpha * expm1(x / alpha));
|
|
}
|
|
|
|
inline void setKernelParams(ocl::Kernel& kernel) const
|
|
{
|
|
kernel.set(3, alpha);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::CeluOp>(target, stream, alpha);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<CeluFunctor>::ocl_kernel_name = "CeluForward";
|
|
|
|
struct HardSigmoidFunctor : public BaseDefaultFunctor<HardSigmoidFunctor>
|
|
{
|
|
typedef HardSigmoidLayer Layer;
|
|
|
|
float alpha;
|
|
float beta;
|
|
|
|
explicit HardSigmoidFunctor(float alpha_ = 0.2f, float beta_ = 0.5f) : alpha(alpha_), beta(beta_) {}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return max(0.f, min(1.f, alpha * x + beta));
|
|
}
|
|
|
|
inline void setKernelParams(ocl::Kernel& kernel) const
|
|
{
|
|
kernel.set(3, alpha);
|
|
kernel.set(4, beta);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::HardSigmoidOp>(target, stream, alpha, beta);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<HardSigmoidFunctor>::ocl_kernel_name = "HardSigmoidForward";
|
|
|
|
struct SeluFunctor : public BaseDefaultFunctor<SeluFunctor>
|
|
{
|
|
typedef SeluLayer Layer;
|
|
|
|
float alpha;
|
|
float gamma;
|
|
|
|
explicit SeluFunctor(float alpha_ = 1.67326319217681884765625f,
|
|
float gamma_ = 1.05070102214813232421875f) : alpha(alpha_), gamma(gamma_) {}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return gamma * (x > 0.f ? x : alpha * expm1(x));
|
|
}
|
|
|
|
inline void setKernelParams(ocl::Kernel& kernel) const
|
|
{
|
|
kernel.set(3, alpha);
|
|
kernel.set(4, gamma);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::SeluOp>(target, stream, alpha, gamma);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<SeluFunctor>::ocl_kernel_name = "SeluForward";
|
|
|
|
struct ThresholdedReluFunctor : public BaseDefaultFunctor<ThresholdedReluFunctor>
|
|
{
|
|
typedef ThresholdedReluLayer Layer;
|
|
|
|
float alpha;
|
|
|
|
explicit ThresholdedReluFunctor(float alpha_ = 1.f) : alpha(alpha_) {}
|
|
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return x > alpha ? x : 0.f;
|
|
}
|
|
|
|
inline void setKernelParams(ocl::Kernel& kernel) const
|
|
{
|
|
kernel.set(3, alpha);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::ThresholdedReluOp>(target, stream, alpha);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const BaseDefaultFunctor<ThresholdedReluFunctor>::ocl_kernel_name = "ThresholdedReluForward";
|
|
|
|
struct PowerFunctor : public BaseFunctor
|
|
{
|
|
typedef PowerLayer Layer;
|
|
|
|
float power, scale, shift;
|
|
float originPower, originScale, originShift;
|
|
|
|
explicit PowerFunctor(float power_ = 1.f, float scale_ = 1.f, float shift_ = 0.f)
|
|
: power(power_), scale(scale_), shift(shift_),
|
|
originPower(power_), originScale(scale_), originShift(shift_) {}
|
|
|
|
bool supportBackend(int backendId, int targetId)
|
|
{
|
|
#ifdef HAVE_INF_ENGINE
|
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
return true;
|
|
#endif
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE;
|
|
}
|
|
}
|
|
|
|
void finalize()
|
|
{
|
|
power = originPower;
|
|
scale = originScale;
|
|
shift = originShift;
|
|
}
|
|
|
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
|
{
|
|
float a = scale, b = shift, p = power;
|
|
if( p == 1.f )
|
|
{
|
|
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
|
|
{
|
|
for( int i = 0; i < len; i++ )
|
|
{
|
|
float x = srcptr[i];
|
|
dstptr[i] = a*x + b;
|
|
}
|
|
}
|
|
}
|
|
else
|
|
{
|
|
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
|
|
{
|
|
for( int i = 0; i < len; i++ )
|
|
{
|
|
float x = srcptr[i];
|
|
dstptr[i] = pow(a*x + b, p);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
#ifdef HAVE_OPENCL
|
|
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
|
|
{
|
|
std::vector<UMat> inputs;
|
|
std::vector<UMat> outputs;
|
|
|
|
inps.getUMatVector(inputs);
|
|
outs.getUMatVector(outputs);
|
|
String buildopt = oclGetTMacro(inputs[0]);
|
|
|
|
for (size_t i = 0; i < inputs.size(); i++)
|
|
{
|
|
UMat& src = inputs[i];
|
|
UMat& dst = outputs[i];
|
|
|
|
ocl::Kernel kernel("PowForward", ocl::dnn::activations_oclsrc, buildopt);
|
|
kernel.set(0, (int)src.total());
|
|
kernel.set(1, ocl::KernelArg::PtrReadOnly(src));
|
|
kernel.set(2, ocl::KernelArg::PtrWriteOnly(dst));
|
|
kernel.set(3, (float)power);
|
|
kernel.set(4, (float)scale);
|
|
kernel.set(5, (float)shift);
|
|
|
|
size_t gSize = src.total();
|
|
CV_Assert(kernel.run(1, &gSize, NULL, false));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::PowerOp>(target, stream, power, scale, shift);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
Halide::Expr topExpr = (scale == 1.0f ? input : input * scale);
|
|
if (shift)
|
|
{
|
|
topExpr += shift;
|
|
}
|
|
if (power != 1.0f)
|
|
{
|
|
topExpr = pow(topExpr, power);
|
|
}
|
|
top(x, y, c, n) = topExpr;
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
CV_Error(Error::StsNotImplemented, "");
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
|
|
ngraph::Shape{1}, &scale);
|
|
auto shift_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
|
|
ngraph::Shape{1}, &shift);
|
|
|
|
auto mul = std::make_shared<ngraph::op::v1::Multiply>(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY);
|
|
auto scale_shift = std::make_shared<ngraph::op::v1::Add>(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY);
|
|
|
|
if (power == 1)
|
|
return scale_shift;
|
|
|
|
auto power_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
|
|
ngraph::Shape{1}, &power);
|
|
return std::make_shared<ngraph::op::v1::Power>(scale_shift, power_node, ngraph::op::AutoBroadcastType::NUMPY);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
#ifdef HAVE_WEBNN
|
|
ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input)
|
|
{
|
|
CV_Error(Error::StsNotImplemented, "");
|
|
ml::Operand operand;
|
|
return operand;
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_VULKAN
|
|
std::shared_ptr<vkcom::OpBase> initVkCom()
|
|
{
|
|
// TODO: add vkcom implementation
|
|
return std::shared_ptr<vkcom::OpBase>();
|
|
}
|
|
#endif // HAVE_VULKAN
|
|
|
|
bool tryFuse(Ptr<dnn::Layer>& top)
|
|
{
|
|
if (power != 1.0f && shift != 0.0f)
|
|
return false;
|
|
|
|
Mat w, b;
|
|
top->getScaleShift(w, b);
|
|
if ((w.empty() && b.empty()) || w.total() > 1 || b.total() > 1)
|
|
return false;
|
|
|
|
float nextScale = w.empty() ? 1.0f : w.at<float>(0);
|
|
float nextShift = b.empty() ? 0.0f : b.at<float>(0);
|
|
scale = std::pow(scale, power) * nextScale;
|
|
shift = nextScale * shift + nextShift;
|
|
return true;
|
|
}
|
|
|
|
void getScaleShift(Mat& _scale, Mat& _shift) const
|
|
{
|
|
if (power == 1.0f)
|
|
{
|
|
_scale = Mat(1, 1, CV_32F, Scalar(scale));
|
|
_shift = Mat(1, 1, CV_32F, Scalar(shift));
|
|
}
|
|
}
|
|
|
|
int64 getFLOPSPerElement() const { return power == 1 ? 2 : 10; }
|
|
};
|
|
|
|
struct ExpFunctor : public BaseDefaultFunctor<ExpFunctor>
|
|
{
|
|
typedef ExpLayer Layer;
|
|
float base, scale, shift;
|
|
float normScale, normShift;
|
|
|
|
ExpFunctor(float base_ = -1.f, float scale_ = 1.f, float shift_ = 0.f)
|
|
: base(base_), scale(scale_), shift(shift_)
|
|
{
|
|
// For base > 0 :
|
|
// y = base^(scale * input + shift)
|
|
// ln(y) = ln(base)*(scale * input + shift)
|
|
// y = exp((ln(base)*scale) * input + (ln(base)*shift))
|
|
// y = exp(normalized_scale * input + normalized_shift)
|
|
CV_Check(base, base == -1.f || base > 0.f, "Unsupported 'base' value");
|
|
const float ln_base = (base == -1.f) ? 1.f : log(base);
|
|
normScale = scale * ln_base;
|
|
normShift = shift * ln_base;
|
|
}
|
|
|
|
bool supportBackend(int backendId, int targetId)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return exp(normScale * x + normShift);
|
|
}
|
|
|
|
inline void setKernelParams(ocl::Kernel& kernel) const
|
|
{
|
|
kernel.set(3, normScale);
|
|
kernel.set(4, normShift);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::ExpOp>(target, stream, normScale, normShift);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
top(x, y, c, n) = exp(normScale * input + normShift);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
auto scale_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
|
|
ngraph::Shape{1}, &normScale);
|
|
auto shift_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
|
|
ngraph::Shape{1}, &normShift);
|
|
auto mul = std::make_shared<ngraph::op::v1::Multiply>(scale_node, node, ngraph::op::AutoBroadcastType::NUMPY);
|
|
auto scale_shift = std::make_shared<ngraph::op::v1::Add>(mul, shift_node, ngraph::op::AutoBroadcastType::NUMPY);
|
|
return std::make_shared<ngraph::op::v0::Exp>(scale_shift);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
int64 getFLOPSPerElement() const { return 3; }
|
|
};
|
|
|
|
template<>
|
|
const char* const ExpFunctor::BaseDefaultFunctor<ExpFunctor>::ocl_kernel_name = "ExpForward";
|
|
|
|
struct ChannelsPReLUFunctor : public BaseFunctor
|
|
{
|
|
typedef ChannelsPReLULayer Layer;
|
|
Mat scale;
|
|
#ifdef HAVE_OPENCL
|
|
UMat scale_umat;
|
|
#endif
|
|
|
|
explicit ChannelsPReLUFunctor(const Mat& scale_=Mat()) : scale(scale_)
|
|
{
|
|
}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
#ifdef HAVE_INF_ENGINE
|
|
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
|
return true;
|
|
#endif
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA ||
|
|
backendId == DNN_BACKEND_HALIDE;
|
|
}
|
|
|
|
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
|
{
|
|
CV_Assert(scale.isContinuous() && scale.type() == CV_32F);
|
|
|
|
const float* scaleptr = scale.ptr<float>();
|
|
CV_Assert( 0 <= cn0 && cn0 < cn1 && cn1 <= (int)scale.total() );
|
|
|
|
for( int cn = cn0; cn < cn1; cn++, srcptr += planeSize, dstptr += planeSize )
|
|
{
|
|
float s = scaleptr[cn];
|
|
int i = 0;
|
|
#if CV_SIMD128
|
|
v_float32x4 s4 = v_setall_f32(s), z = v_setzero_f32();
|
|
for( ; i <= len - 16; i += 16 )
|
|
{
|
|
v_float32x4 x0 = v_load(srcptr + i);
|
|
v_float32x4 x1 = v_load(srcptr + i + 4);
|
|
v_float32x4 x2 = v_load(srcptr + i + 8);
|
|
v_float32x4 x3 = v_load(srcptr + i + 12);
|
|
x0 = v_select(x0 >= z, x0, x0*s4);
|
|
x1 = v_select(x1 >= z, x1, x1*s4);
|
|
x2 = v_select(x2 >= z, x2, x2*s4);
|
|
x3 = v_select(x3 >= z, x3, x3*s4);
|
|
v_store(dstptr + i, x0);
|
|
v_store(dstptr + i + 4, x1);
|
|
v_store(dstptr + i + 8, x2);
|
|
v_store(dstptr + i + 12, x3);
|
|
}
|
|
#endif
|
|
for( ; i < len; i++ )
|
|
{
|
|
float x = srcptr[i];
|
|
dstptr[i] = x >= 0.f ? x : s*x;
|
|
}
|
|
}
|
|
}
|
|
|
|
#ifdef HAVE_OPENCL
|
|
bool applyOCL(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
|
|
{
|
|
if (scale_umat.empty())
|
|
scale.copyTo(scale_umat);
|
|
|
|
std::vector<UMat> inputs;
|
|
std::vector<UMat> outputs;
|
|
|
|
inps.getUMatVector(inputs);
|
|
outs.getUMatVector(outputs);
|
|
String buildopt = oclGetTMacro(inputs[0]);
|
|
|
|
for (size_t i = 0; i < inputs.size(); i++)
|
|
{
|
|
UMat& src = inputs[i];
|
|
UMat& dst = outputs[i];
|
|
|
|
ocl::Kernel kernel("PReLUForward", ocl::dnn::activations_oclsrc, buildopt);
|
|
kernel.set(0, (int)src.total());
|
|
kernel.set(1, (int)src.size[1]);
|
|
kernel.set(2, (int)total(shape(src), 2));
|
|
kernel.set(3, ocl::KernelArg::PtrReadOnly(src));
|
|
kernel.set(4, ocl::KernelArg::PtrWriteOnly(dst));
|
|
kernel.set(5, ocl::KernelArg::PtrReadOnly(scale_umat));
|
|
|
|
size_t gSize = src.total();
|
|
CV_Assert(kernel.run(1, &gSize, NULL, false));
|
|
}
|
|
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::ChannelwiseReLUOp>(target, stream, scale);
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_HALIDE
|
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
|
{
|
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
|
auto weights = wrapToHalideBuffer(scale, {(int)scale.total()});
|
|
top(x, y, c, n) = select(input >= 0.0f, input, weights(c) * input);
|
|
}
|
|
#endif // HAVE_HALIDE
|
|
|
|
#ifdef HAVE_CANN
|
|
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
|
|
{
|
|
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
|
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
|
auto x_desc = x->getTensorDesc();
|
|
|
|
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
|
|
|
std::string op_name = cv::format("prelu_%d", index);
|
|
auto op = std::make_shared<ge::op::PRelu>(op_name);
|
|
|
|
op->set_input_x_by_name(*op_x, "y");
|
|
op->update_input_desc_x(*x_desc);
|
|
|
|
std::vector<int> shape_{scale.size[0]}; // scale should be a 1d of shape [n] tensor, and it is a 2d mat of shape [n, 1] in opencv
|
|
auto op_const_slope = std::make_shared<CannConstOp>(scale.data, scale.type(), shape_, cv::format("%s_weight", op_name.c_str()));
|
|
op->set_input_weight(*(op_const_slope->getOp()));
|
|
op->update_input_desc_weight(*(op_const_slope->getTensorDesc()));
|
|
|
|
op->update_output_desc_y(*output_desc);
|
|
|
|
return Ptr<BackendNode>(new CannBackendNode(op));
|
|
}
|
|
#endif // HAVE_CANN
|
|
|
|
#ifdef HAVE_DNN_NGRAPH
|
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
|
{
|
|
const size_t numChannels = scale.total();
|
|
auto slope = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{numChannels}, scale.data);
|
|
return std::make_shared<ngraph::op::PRelu>(node, slope);
|
|
}
|
|
#endif // HAVE_DNN_NGRAPH
|
|
|
|
#ifdef HAVE_WEBNN
|
|
ml::Operand initWebnnAPI(const ml::GraphBuilder& builder, const ml::Operand& input)
|
|
{
|
|
CV_Error(Error::StsNotImplemented, "");
|
|
ml::Operand operand;
|
|
return operand;
|
|
}
|
|
#endif
|
|
|
|
#ifdef HAVE_VULKAN
|
|
std::shared_ptr<vkcom::OpBase> initVkCom()
|
|
{
|
|
// TODO: add vkcom implementation
|
|
return std::shared_ptr<vkcom::OpBase>();
|
|
}
|
|
#endif // HAVE_VULKAN
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
struct SignFunctor : public BaseDefaultFunctor<SignFunctor>
|
|
{
|
|
typedef SignLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return x > 0.f ? 1.f : (x < 0.f ? -1.f : 0.f);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::SignOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const SignFunctor::BaseDefaultFunctor<SignFunctor>::ocl_kernel_name = "SignForward";
|
|
|
|
|
|
struct ShrinkFunctor : public BaseDefaultFunctor<ShrinkFunctor>
|
|
{
|
|
typedef ShrinkLayer Layer;
|
|
float bias;
|
|
float lambd;
|
|
|
|
explicit ShrinkFunctor(float bias_ = 0.0f, float lambd_ = 0.5f) : bias(bias_), lambd(lambd_) {}
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return x > lambd ? x - bias : (x < -lambd ? x + bias : 0.f);
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::ShrinkOp>(target, stream, bias, lambd);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const ShrinkFunctor::BaseDefaultFunctor<ShrinkFunctor>::ocl_kernel_name = "ShrinkForward";
|
|
|
|
struct ReciprocalFunctor : public BaseDefaultFunctor<ReciprocalFunctor>
|
|
{
|
|
typedef ReciprocalLayer Layer;
|
|
|
|
bool supportBackend(int backendId, int)
|
|
{
|
|
return backendId == DNN_BACKEND_OPENCV ||
|
|
backendId == DNN_BACKEND_CUDA;
|
|
}
|
|
|
|
inline float calculate(float x) const
|
|
{
|
|
return 1.f/x;
|
|
}
|
|
|
|
#ifdef HAVE_CUDA
|
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
|
{
|
|
return make_cuda_node<cuda4dnn::ReciprocalOp>(target, stream);
|
|
}
|
|
#endif
|
|
|
|
int64 getFLOPSPerElement() const { return 1; }
|
|
};
|
|
|
|
template<>
|
|
const char* const ReciprocalFunctor::BaseDefaultFunctor<ReciprocalFunctor>::ocl_kernel_name = "ReciprocalForward";
|
|
|
|
|
|
#define ACTIVATION_CREATOR_FOR(_Layer, _Functor, ...) \
|
|
Ptr<_Layer> _Layer::create() { \
|
|
return return Ptr<_Layer>( new ElementWiseLayer<_Functor>(_Functor()) ); }
|
|
|
|
|
|
Ptr<ReLULayer> ReLULayer::create(const LayerParams& params)
|
|
{
|
|
float negativeSlope = params.get<float>("negative_slope", 0.f);
|
|
Ptr<ReLULayer> l(new ElementWiseLayer<ReLUFunctor>(ReLUFunctor(negativeSlope)));
|
|
l->setParamsFrom(params);
|
|
l->negativeSlope = negativeSlope;
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<ReLU6Layer> ReLU6Layer::create(const LayerParams& params)
|
|
{
|
|
float minValue = params.get<float>("min_value", 0.0f);
|
|
float maxValue = params.get<float>("max_value", 6.0f);
|
|
Ptr<ReLU6Layer> l(new ElementWiseLayer<ReLU6Functor>(ReLU6Functor(minValue, maxValue)));
|
|
l->setParamsFrom(params);
|
|
l->minValue = minValue;
|
|
l->maxValue = maxValue;
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<GeluLayer> GeluLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<GeluLayer> l(new ElementWiseLayer<GeluFunctor>(GeluFunctor()));
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<GeluApproximationLayer> GeluApproximationLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<GeluApproximationLayer> l(new ElementWiseLayer<GeluApproximationFunctor>(GeluApproximationFunctor()));
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<TanHLayer> TanHLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<TanHLayer> l(new ElementWiseLayer<TanHFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<SwishLayer> SwishLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<SwishLayer> l(new ElementWiseLayer<SwishFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<MishLayer> MishLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<MishLayer> l(new ElementWiseLayer<MishFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<SigmoidLayer> SigmoidLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<SigmoidLayer> l(new ElementWiseLayer<SigmoidFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<ELULayer> ELULayer::create(const LayerParams& params)
|
|
{
|
|
float alpha = params.get<float>("alpha", 1.0f);
|
|
Ptr<ELULayer> l(new ElementWiseLayer<ELUFunctor>(ELUFunctor(alpha)));
|
|
l->setParamsFrom(params);
|
|
l->alpha = alpha;
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<AbsLayer> AbsLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<AbsLayer> l(new ElementWiseLayer<AbsValFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<BNLLLayer> BNLLLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<BNLLLayer> l(new ElementWiseLayer<BNLLFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
|
|
Ptr<CeilLayer> CeilLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<CeilLayer> l(new ElementWiseLayer<CeilFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<FloorLayer> FloorLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<FloorLayer> l(new ElementWiseLayer<FloorFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<LogLayer> LogLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<LogLayer> l(new ElementWiseLayer<LogFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<RoundLayer> RoundLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<RoundLayer> l(new ElementWiseLayer<RoundFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<SqrtLayer> SqrtLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<SqrtLayer> l(new ElementWiseLayer<SqrtFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<NotLayer> NotLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<NotLayer> l(new ElementWiseLayer<NotFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<AcosLayer> AcosLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<AcosLayer> l(new ElementWiseLayer<AcosFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<AcoshLayer> AcoshLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<AcoshLayer> l(new ElementWiseLayer<AcoshFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<AsinLayer> AsinLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<AsinLayer> l(new ElementWiseLayer<AsinFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<AsinhLayer> AsinhLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<AsinhLayer> l(new ElementWiseLayer<AsinhFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<AtanLayer> AtanLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<AtanLayer> l(new ElementWiseLayer<AtanFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<AtanhLayer> AtanhLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<AtanhLayer> l(new ElementWiseLayer<AtanhFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<CosLayer> CosLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<CosLayer> l(new ElementWiseLayer<CosFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<CoshLayer> CoshLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<CoshLayer> l(new ElementWiseLayer<CoshFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<ErfLayer> ErfLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<ErfLayer> l(new ElementWiseLayer<ErfFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<HardSwishLayer> HardSwishLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<HardSwishLayer> l(new ElementWiseLayer<HardSwishFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<SinLayer> SinLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<SinLayer> l(new ElementWiseLayer<SinFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<SinhLayer> SinhLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<SinhLayer> l(new ElementWiseLayer<SinhFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<SoftplusLayer> SoftplusLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<SoftplusLayer> l(new ElementWiseLayer<SoftplusFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<SoftsignLayer> SoftsignLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<SoftsignLayer> l(new ElementWiseLayer<SoftsignFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<TanLayer> TanLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<TanLayer> l(new ElementWiseLayer<TanFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<CeluLayer> CeluLayer::create(const LayerParams& params)
|
|
{
|
|
float alpha = params.get<float>("alpha", 1.f);
|
|
Ptr<CeluLayer> l(new ElementWiseLayer<CeluFunctor>(CeluFunctor(alpha)));
|
|
l->setParamsFrom(params);
|
|
l->alpha = alpha;
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<HardSigmoidLayer> HardSigmoidLayer::create(const LayerParams& params)
|
|
{
|
|
float alpha = params.get<float>("alpha", 0.2f);
|
|
float beta = params.get<float>("beta", 0.5f);
|
|
Ptr<HardSigmoidLayer> l(new ElementWiseLayer<HardSigmoidFunctor>(HardSigmoidFunctor(alpha, beta)));
|
|
l->setParamsFrom(params);
|
|
l->alpha = alpha;
|
|
l->beta = beta;
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<SeluLayer> SeluLayer::create(const LayerParams& params)
|
|
{
|
|
float alpha = params.get<float>("alpha", 1.67326319217681884765625f);
|
|
float gamma = params.get<float>("gamma", 1.05070102214813232421875f);
|
|
Ptr<SeluLayer> l(new ElementWiseLayer<SeluFunctor>(SeluFunctor(alpha, gamma)));
|
|
l->setParamsFrom(params);
|
|
l->alpha = alpha;
|
|
l->gamma = gamma;
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<ThresholdedReluLayer> ThresholdedReluLayer::create(const LayerParams& params)
|
|
{
|
|
float alpha = params.get<float>("alpha", 1.f);
|
|
Ptr<ThresholdedReluLayer> l(new ElementWiseLayer<ThresholdedReluFunctor>(ThresholdedReluFunctor(alpha)));
|
|
l->setParamsFrom(params);
|
|
l->alpha = alpha;
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<PowerLayer> PowerLayer::create(const LayerParams& params)
|
|
{
|
|
float power = params.get<float>("power", 1.0f);
|
|
float scale = params.get<float>("scale", 1.0f);
|
|
float shift = params.get<float>("shift", 0.0f);
|
|
Ptr<PowerLayer> l(new ElementWiseLayer<PowerFunctor>(PowerFunctor(power, scale, shift)));
|
|
l->setParamsFrom(params);
|
|
l->power = power;
|
|
l->scale = scale;
|
|
l->shift = shift;
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<ExpLayer> ExpLayer::create(const LayerParams& params)
|
|
{
|
|
float base = params.get<float>("base", -1.0f);
|
|
float scale = params.get<float>("scale", 1.0f);
|
|
float shift = params.get<float>("shift", 0.0f);
|
|
Ptr<ExpLayer> l(new ElementWiseLayer<ExpFunctor>(ExpFunctor(base, scale, shift)));
|
|
l->setParamsFrom(params);
|
|
l->base = base;
|
|
l->scale = scale;
|
|
l->shift = shift;
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<Layer> ChannelsPReLULayer::create(const LayerParams& params)
|
|
{
|
|
CV_Assert(params.blobs.size() == 1);
|
|
if (params.blobs[0].total() == 1)
|
|
{
|
|
LayerParams reluParams = params;
|
|
reluParams.set("negative_slope", *params.blobs[0].ptr<float>());
|
|
return ReLULayer::create(reluParams);
|
|
}
|
|
Ptr<ChannelsPReLULayer> l(new ElementWiseLayer<ChannelsPReLUFunctor>(ChannelsPReLUFunctor(params.blobs[0])));
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<SignLayer> SignLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<SignLayer> l(new ElementWiseLayer<SignFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<ReciprocalLayer> ReciprocalLayer::create(const LayerParams& params)
|
|
{
|
|
Ptr<ReciprocalLayer> l(new ElementWiseLayer<ReciprocalFunctor>());
|
|
l->setParamsFrom(params);
|
|
|
|
return l;
|
|
}
|
|
|
|
Ptr<ShrinkLayer> ShrinkLayer::create(const LayerParams& params)
|
|
{
|
|
float bias = params.get<float>("bias", 0.f);
|
|
float lambd = params.get<float>("lambd", 0.5f);
|
|
Ptr<ShrinkLayer> l(new ElementWiseLayer<ShrinkFunctor>(ShrinkFunctor(bias, lambd)));
|
|
l->setParamsFrom(params);
|
|
l->bias = bias;
|
|
l->lambd = lambd;
|
|
|
|
return l;
|
|
}
|
|
}
|
|
}
|