mirror of
https://github.com/opencv/opencv.git
synced 2025-06-13 13:13:26 +08:00
add alpha parameter to ELU
This commit is contained in:
parent
d58b5ef74b
commit
4995aecd62
@ -545,6 +545,8 @@ CV__DNN_INLINE_NS_BEGIN
|
|||||||
class CV_EXPORTS ELULayer : public ActivationLayer
|
class CV_EXPORTS ELULayer : public ActivationLayer
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
float alpha;
|
||||||
|
|
||||||
static Ptr<ELULayer> create(const LayerParams ¶ms);
|
static Ptr<ELULayer> create(const LayerParams ¶ms);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -119,8 +119,8 @@ void sigmoid(const Stream& stream, Span<T> output, View<T> input) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
void elu(const Stream& stream, Span<T> output, View<T> input) {
|
void elu(const Stream& stream, Span<T> output, View<T> input, T alpha) {
|
||||||
generic_op<T, ELUFunctor<T>>(stream, output, input);
|
generic_op<T, ELUFunctor<T>>(stream, output, input, {alpha});
|
||||||
}
|
}
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
@ -187,7 +187,7 @@ template void tanh<__half>(const Stream&, Span<__half>, View<__half>);
|
|||||||
template void swish<__half>(const Stream&, Span<__half>, View<__half>);
|
template void swish<__half>(const Stream&, Span<__half>, View<__half>);
|
||||||
template void mish<__half>(const Stream&, Span<__half>, View<__half>);
|
template void mish<__half>(const Stream&, Span<__half>, View<__half>);
|
||||||
template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>);
|
template void sigmoid<__half>(const Stream&, Span<__half>, View<__half>);
|
||||||
template void elu<__half>(const Stream&, Span<__half>, View<__half>);
|
template void elu<__half>(const Stream&, Span<__half>, View<__half>, __half);
|
||||||
template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input);
|
template void abs<__half>(const Stream& stream, Span<__half> output, View<__half> input);
|
||||||
template void bnll<__half>(const Stream&, Span<__half>, View<__half>);
|
template void bnll<__half>(const Stream&, Span<__half>, View<__half>);
|
||||||
template void ceil<__half>(const Stream&, Span<__half>, View<__half>);
|
template void ceil<__half>(const Stream&, Span<__half>, View<__half>);
|
||||||
@ -207,7 +207,7 @@ template void tanh<float>(const Stream&, Span<float>, View<float>);
|
|||||||
template void swish<float>(const Stream&, Span<float>, View<float>);
|
template void swish<float>(const Stream&, Span<float>, View<float>);
|
||||||
template void mish<float>(const Stream&, Span<float>, View<float>);
|
template void mish<float>(const Stream&, Span<float>, View<float>);
|
||||||
template void sigmoid<float>(const Stream&, Span<float>, View<float>);
|
template void sigmoid<float>(const Stream&, Span<float>, View<float>);
|
||||||
template void elu<float>(const Stream&, Span<float>, View<float>);
|
template void elu<float>(const Stream&, Span<float>, View<float>, float);
|
||||||
template void abs<float>(const Stream& stream, Span<float> output, View<float> input);
|
template void abs<float>(const Stream& stream, Span<float> output, View<float> input);
|
||||||
template void bnll<float>(const Stream&, Span<float>, View<float>);
|
template void bnll<float>(const Stream&, Span<float>, View<float>);
|
||||||
template void ceil<float>(const Stream&, Span<float>, View<float>);
|
template void ceil<float>(const Stream&, Span<float>, View<float>);
|
||||||
|
@ -169,16 +169,20 @@ struct SigmoidFunctor {
|
|||||||
template <class T>
|
template <class T>
|
||||||
struct ELUFunctor {
|
struct ELUFunctor {
|
||||||
struct Params {
|
struct Params {
|
||||||
CUDA4DNN_HOST_DEVICE Params() { }
|
CUDA4DNN_HOST_DEVICE Params() : alpha(1) { }
|
||||||
|
CUDA4DNN_HOST_DEVICE Params(T alpha_) : alpha(alpha_) { }
|
||||||
|
T alpha;
|
||||||
};
|
};
|
||||||
|
|
||||||
CUDA4DNN_DEVICE ELUFunctor() { }
|
CUDA4DNN_DEVICE ELUFunctor() : ELUFunctor(Params{}) { }
|
||||||
CUDA4DNN_DEVICE ELUFunctor(const Params& params) { }
|
CUDA4DNN_DEVICE ELUFunctor(const Params& params) : alpha{params.alpha} { }
|
||||||
|
|
||||||
CUDA4DNN_DEVICE T operator()(T value) {
|
CUDA4DNN_DEVICE T operator()(T value) {
|
||||||
using csl::device::expm1;
|
using csl::device::expm1;
|
||||||
return value >= T(0) ? value : expm1(value);
|
return value >= T(0) ? value : alpha * expm1(value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
T alpha;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
|
@ -34,7 +34,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
|
|||||||
void sigmoid(const csl::Stream& stream, csl::Span<T> output, csl::View<T> input);
|
void sigmoid(const csl::Stream& stream, csl::Span<T> output, csl::View<T> input);
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
void elu(const csl::Stream& stream, csl::Span<T> output, csl::View<T> input);
|
void elu(const csl::Stream& stream, csl::Span<T> output, csl::View<T> input, T alpha);
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
void abs(const csl::Stream& stream, csl::Span<T> output, csl::View<T> input);
|
void abs(const csl::Stream& stream, csl::Span<T> output, csl::View<T> input);
|
||||||
|
@ -156,15 +156,16 @@ namespace cv { namespace dnn { namespace cuda4dnn {
|
|||||||
template <class T>
|
template <class T>
|
||||||
class ELUOp final : public BaseOp<ELUOp, T> {
|
class ELUOp final : public BaseOp<ELUOp, T> {
|
||||||
public:
|
public:
|
||||||
ELUOp(csl::Stream stream_) : stream(std::move(stream_)) { }
|
ELUOp(csl::Stream stream_, T alpha_) : stream(std::move(stream_)), alpha(alpha_) { }
|
||||||
|
|
||||||
void calculate(csl::TensorSpan<T> output, csl::TensorView<T> input) const
|
void calculate(csl::TensorSpan<T> output, csl::TensorView<T> input) const
|
||||||
{
|
{
|
||||||
kernels::elu<T>(stream, output, input);
|
kernels::elu<T>(stream, output, input, alpha);
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
csl::Stream stream;
|
csl::Stream stream;
|
||||||
|
T alpha;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
|
@ -987,6 +987,9 @@ const char* const SigmoidFunctor::BaseDefaultFunctor<SigmoidFunctor>::ocl_kernel
|
|||||||
struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
|
struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
|
||||||
{
|
{
|
||||||
typedef ELULayer Layer;
|
typedef ELULayer Layer;
|
||||||
|
float alpha;
|
||||||
|
|
||||||
|
explicit ELUFunctor(float alpha_ = 1.f) : alpha(alpha_) {}
|
||||||
|
|
||||||
bool supportBackend(int backendId, int)
|
bool supportBackend(int backendId, int)
|
||||||
{
|
{
|
||||||
@ -998,13 +1001,18 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
|
|||||||
|
|
||||||
inline float calculate(float x) const
|
inline float calculate(float x) const
|
||||||
{
|
{
|
||||||
return x >= 0.f ? x : exp(x) - 1.f;
|
return x >= 0.f ? x : alpha * (exp(x) - 1.f);
|
||||||
|
}
|
||||||
|
|
||||||
|
inline void setKernelParams(ocl::Kernel& kernel) const
|
||||||
|
{
|
||||||
|
kernel.set(3, alpha);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef HAVE_CUDA
|
#ifdef HAVE_CUDA
|
||||||
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
Ptr<BackendNode> initCUDA(int target, csl::Stream stream)
|
||||||
{
|
{
|
||||||
return make_cuda_node<cuda4dnn::ELUOp>(target, stream);
|
return make_cuda_node<cuda4dnn::ELUOp>(target, stream, alpha);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1012,7 +1020,7 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
|
|||||||
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
void attachHalide(const Halide::Expr& input, Halide::Func& top)
|
||||||
{
|
{
|
||||||
Halide::Var x("x"), y("y"), c("c"), n("n");
|
Halide::Var x("x"), y("y"), c("c"), n("n");
|
||||||
top(x, y, c, n) = select(input >= 0.0f, input, exp(input) - 1);
|
top(x, y, c, n) = select(input >= 0.0f, input, alpha * (exp(input) - 1));
|
||||||
}
|
}
|
||||||
#endif // HAVE_HALIDE
|
#endif // HAVE_HALIDE
|
||||||
|
|
||||||
@ -1026,7 +1034,7 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
|
|||||||
#ifdef HAVE_DNN_NGRAPH
|
#ifdef HAVE_DNN_NGRAPH
|
||||||
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
std::shared_ptr<ngraph::Node> initNgraphAPI(const std::shared_ptr<ngraph::Node>& node)
|
||||||
{
|
{
|
||||||
return std::make_shared<ngraph::op::Elu>(node, 1.0);
|
return std::make_shared<ngraph::op::Elu>(node, alpha);
|
||||||
}
|
}
|
||||||
#endif // HAVE_DNN_NGRAPH
|
#endif // HAVE_DNN_NGRAPH
|
||||||
|
|
||||||
@ -1856,8 +1864,10 @@ Ptr<SigmoidLayer> SigmoidLayer::create(const LayerParams& params)
|
|||||||
|
|
||||||
Ptr<ELULayer> ELULayer::create(const LayerParams& params)
|
Ptr<ELULayer> ELULayer::create(const LayerParams& params)
|
||||||
{
|
{
|
||||||
Ptr<ELULayer> l(new ElementWiseLayer<ELUFunctor>(ELUFunctor()));
|
float alpha = params.get<float>("alpha", 1.0f);
|
||||||
|
Ptr<ELULayer> l(new ElementWiseLayer<ELUFunctor>(ELUFunctor(alpha)));
|
||||||
l->setParamsFrom(params);
|
l->setParamsFrom(params);
|
||||||
|
l->alpha = alpha;
|
||||||
|
|
||||||
return l;
|
return l;
|
||||||
}
|
}
|
||||||
|
@ -131,13 +131,14 @@ __kernel void PowForward(const int n, __global const T* in, __global T* out,
|
|||||||
out[index] = pow(shift + scale * in[index], power);
|
out[index] = pow(shift + scale * in[index], power);
|
||||||
}
|
}
|
||||||
|
|
||||||
__kernel void ELUForward(const int n, __global const T* in, __global T* out)
|
__kernel void ELUForward(const int n, __global const T* in, __global T* out,
|
||||||
|
const KERNEL_ARG_DTYPE alpha)
|
||||||
{
|
{
|
||||||
int index = get_global_id(0);
|
int index = get_global_id(0);
|
||||||
if (index < n)
|
if (index < n)
|
||||||
{
|
{
|
||||||
T src = in[index];
|
T src = in[index];
|
||||||
out[index] = (src >= 0.f) ? src : exp(src) - 1;
|
out[index] = (src >= 0.f) ? src : alpha * (exp(src) - 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user