mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 01:13:28 +08:00
Merge pull request #23401 from fengyuentau:fix_cann_layer_support
dnn: Support more operators in CANN backend #23401 This PR adds the support of following layers: - [x] Sub - [x] PRelu - [x] DeConv - [x] Also warn users if backend is switched back to default if some of the layers are not supported. - [ ] [Dropped] LSTM: some hacks (adding layers) were introduced which makes it even harder to build the graph for CANN backend. ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
parent
b3a2444bcf
commit
3c1fcd5deb
@ -347,10 +347,12 @@ CV__DNN_INLINE_NS_BEGIN
|
||||
/**
|
||||
* @brief Returns a CANN backend node
|
||||
*
|
||||
* @param inputsWrapper input tensors of this CANN operator
|
||||
* @param inputs input tensors of CANN operator
|
||||
* @param outputs output tensors of CANN operator
|
||||
* @param nodes nodes of input tensors
|
||||
*/
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes);
|
||||
|
||||
/**
|
||||
|
@ -84,7 +84,8 @@ Ptr<BackendNode> Layer::initTimVX(void* timVxInfo,
|
||||
return Ptr<BackendNode>();
|
||||
}
|
||||
|
||||
Ptr<BackendNode> Layer::initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
Ptr<BackendNode> Layer::initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
CV_Error(Error::StsNotImplemented, "CANN pipeline of " + type + " layers is not defined.");
|
||||
|
@ -392,13 +392,14 @@ public:
|
||||
#endif // HAVE_HALIDE
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(nodes.size() == 1);
|
||||
CV_Assert(blobs.size() == 4); // must have scale, offset, mean and variance
|
||||
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto channel = x->host->size[1];
|
||||
|
||||
// create operator
|
||||
|
@ -121,10 +121,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x_desc = x->getTensorDesc();
|
||||
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
||||
|
@ -367,16 +367,17 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(inputsWrapper.size() == nodes.size());
|
||||
CV_Assert(inputs.size() == nodes.size());
|
||||
|
||||
// create operator
|
||||
auto op = std::make_shared<ge::op::ConcatD>(name);
|
||||
|
||||
// set attributes
|
||||
int N = inputsWrapper.size();
|
||||
int N = inputs.size();
|
||||
op->set_attr_concat_dim(axis);
|
||||
op->set_attr_N(N);
|
||||
|
||||
@ -384,7 +385,7 @@ public:
|
||||
op->create_dynamic_input_x(N);
|
||||
for (int i = 0; i < N; i++)
|
||||
{
|
||||
auto x_i = inputsWrapper[i].dynamicCast<CannBackendWrapper>();
|
||||
auto x_i = inputs[i].dynamicCast<CannBackendWrapper>();
|
||||
auto x_i_desc = x_i->getTensorDesc();
|
||||
auto op_x_i = nodes[i].dynamicCast<CannBackendNode>()->getOp();
|
||||
op->set_dynamic_input_x(i, *op_x_i, x_i->name.c_str());
|
||||
|
@ -84,7 +84,8 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto mat_shape = shape(blobs[0]);
|
||||
|
@ -782,16 +782,17 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(!blobs.empty());
|
||||
CV_Assert(inputsWrapper.size() == 1);
|
||||
CV_Assert(inputs.size() == 1);
|
||||
CV_Assert(nodes.size() == 1);
|
||||
|
||||
bool has_bias = hasBias() || fusedBias;
|
||||
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
const auto shape_x = x->host->size; // [b, c, h, w]
|
||||
const int filter_out_channel = blobs[0].size[1];
|
||||
const int groups = shape_x[1] / filter_out_channel;
|
||||
@ -1611,7 +1612,8 @@ public:
|
||||
#endif // HAVE_INF_ENGINE
|
||||
{
|
||||
return backendId == DNN_BACKEND_CUDA ||
|
||||
(kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE));
|
||||
(kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE)) ||
|
||||
(kernel_size.size() == 2 && backendId == DNN_BACKEND_CANN);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2272,6 +2274,79 @@ public:
|
||||
return Ptr<BackendNode>();
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(!blobs.empty());
|
||||
CV_Assert(inputs.size() == 1);
|
||||
CV_Assert(nodes.size() == 1);
|
||||
|
||||
bool has_bias = hasBias() || fusedBias;
|
||||
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto y = outputs[0].dynamicCast<CannBackendWrapper>();
|
||||
const auto shape_x = x->host->size; // [N, C, H, W]
|
||||
const auto shape_y = y->host->size; // [N, C, H, W]
|
||||
const int filter_out_channel = blobs[0].size[0];
|
||||
const int groups = shape_x[1] / filter_out_channel;
|
||||
|
||||
// create operator
|
||||
auto op = std::make_shared<ge::op::Conv2DTransposeD>(name);
|
||||
|
||||
// set attributes
|
||||
op->set_attr_input_size(
|
||||
ge::Operator::OpListInt({(int64_t)shape_y[0],
|
||||
(int64_t)shape_y[1],
|
||||
(int64_t)shape_y[2],
|
||||
(int64_t)shape_y[3],})
|
||||
);
|
||||
op->set_attr_strides(
|
||||
ge::Operator::OpListInt({1, 1, (int64_t)strides[0], (int64_t)strides[1]})
|
||||
);
|
||||
op->set_attr_pads(ge::Operator::OpListInt(
|
||||
{(int64_t)pads_begin[1], (int64_t)pads_end[1], (int64_t)pads_begin[0], (int64_t)pads_end[0]}
|
||||
));
|
||||
op->set_attr_dilations(ge::Operator::OpListInt(
|
||||
{1, 1, (int64_t)dilations[0], (int64_t)dilations[1]}
|
||||
));
|
||||
op->set_attr_groups(groups);
|
||||
op->set_attr_data_format("NCHW");
|
||||
op->set_attr_output_padding(
|
||||
ge::Operator::OpListInt({0, 0, (int64_t)adjust_pads[0], (int64_t)adjust_pads[1]}) // adjust_pads: [height, width]
|
||||
);
|
||||
|
||||
// set inputs
|
||||
// set inputs : x
|
||||
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
op->set_input_x_by_name(*op_x, x->name.c_str());
|
||||
auto desc_x = x->getTensorDesc();
|
||||
op->update_input_desc_x(*desc_x);
|
||||
// set inputs : weight
|
||||
const Mat& mat_w = blobs[0];
|
||||
auto op_const_w = std::make_shared<CannConstOp>(mat_w.data, mat_w.type(), shape(mat_w), cv::format("%s_w", name.c_str()));
|
||||
op->set_input_filter(*(op_const_w->getOp()));
|
||||
op->update_input_desc_filter(*(op_const_w->getTensorDesc()));
|
||||
// set inputs : bias
|
||||
if (has_bias)
|
||||
{
|
||||
int out_channel = blobs[0].size[0];
|
||||
const Mat& mat_b = blobs[1];
|
||||
|
||||
std::vector<int> shape_b{out_channel};
|
||||
auto op_const_b = std::make_shared<CannConstOp>(mat_b.data, mat_b.type(), shape_b, cv::format("%s_b", name.c_str()));
|
||||
op->set_input_bias(*(op_const_b->getOp()));
|
||||
op->update_input_desc_bias(*(op_const_b->getTensorDesc()));
|
||||
}
|
||||
|
||||
// set outputs
|
||||
auto desc_output = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
||||
op->update_output_desc_y(*desc_output);
|
||||
|
||||
return Ptr<BackendNode>(new CannBackendNode(op));
|
||||
}
|
||||
#endif // HAVE_CANN
|
||||
|
||||
#ifdef HAVE_DNN_NGRAPH
|
||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
|
@ -188,10 +188,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
return func.initCannOp(Layer::name, inputsWrapper, nodes);
|
||||
return func.initCannOp(Layer::name, inputs, nodes);
|
||||
}
|
||||
#endif // HAVE_CANN
|
||||
|
||||
@ -461,10 +462,10 @@ struct ReLUFunctor : public BaseFunctor
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto x_desc = x->getTensorDesc();
|
||||
|
||||
@ -655,10 +656,10 @@ struct ReLU6Functor : public BaseFunctor
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::ClipByValue>(name);
|
||||
|
||||
@ -808,7 +809,7 @@ struct BaseDefaultFunctor : public BaseFunctor
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
CV_Error(Error::StsNotImplemented, "");
|
||||
@ -930,10 +931,10 @@ struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::Tanh>(name);
|
||||
|
||||
@ -997,10 +998,10 @@ struct SwishFunctor : public BaseDefaultFunctor<SwishFunctor>
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::Swish>(name);
|
||||
|
||||
@ -1075,10 +1076,10 @@ struct MishFunctor : public BaseDefaultFunctor<MishFunctor>
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::Mish>(name);
|
||||
|
||||
@ -1151,10 +1152,10 @@ struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::Sigmoid>(name);
|
||||
|
||||
@ -1229,10 +1230,10 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::Elu>(name);
|
||||
|
||||
@ -1301,10 +1302,10 @@ struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::Abs>(name);
|
||||
|
||||
@ -1363,10 +1364,10 @@ struct BNLLFunctor : public BaseDefaultFunctor<BNLLFunctor>
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::BNLL>(name);
|
||||
|
||||
@ -1420,10 +1421,10 @@ struct CeilFunctor : public BaseDefaultFunctor<CeilFunctor>
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::BNLL>(name);
|
||||
|
||||
@ -1479,10 +1480,10 @@ struct FloorFunctor : public BaseDefaultFunctor<FloorFunctor>
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
auto op = std::make_shared<ge::op::Floor>(name);
|
||||
|
||||
@ -2334,7 +2335,7 @@ struct PowerFunctor : public BaseFunctor
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
CV_Error(Error::StsNotImplemented, "");
|
||||
@ -2498,7 +2499,8 @@ struct ChannelsPReLUFunctor : public BaseFunctor
|
||||
#endif
|
||||
return backendId == DNN_BACKEND_OPENCV ||
|
||||
backendId == DNN_BACKEND_CUDA ||
|
||||
backendId == DNN_BACKEND_HALIDE;
|
||||
backendId == DNN_BACKEND_HALIDE ||
|
||||
backendId == DNN_BACKEND_CANN;
|
||||
}
|
||||
|
||||
void apply(const float* srcptr, float* dstptr, int len, size_t planeSize, int cn0, int cn1) const
|
||||
@ -2590,10 +2592,10 @@ struct ChannelsPReLUFunctor : public BaseFunctor
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
Ptr<BackendNode> initCannOp(const std::string& name,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes)
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto x_desc = x->getTensorDesc();
|
||||
|
||||
|
@ -849,17 +849,18 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(inputsWrapper.size() == 2);
|
||||
CV_Assert(inputs.size() == 2);
|
||||
CV_Assert(nodes.size() == 2);
|
||||
|
||||
auto op_x1 = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto x1 = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x1 = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x1_desc = x1->getTensorDesc();
|
||||
auto op_x2 = nodes[1].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto x2 = inputsWrapper[1].dynamicCast<CannBackendWrapper>();
|
||||
auto x2 = inputs[1].dynamicCast<CannBackendWrapper>();
|
||||
auto x2_desc = x2->getTensorDesc();
|
||||
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
||||
|
||||
|
@ -176,10 +176,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x_desc = x->getTensorDesc();
|
||||
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
||||
|
@ -662,10 +662,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x1 = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x1 = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x1_desc = x1->getTensorDesc();
|
||||
auto op_x1 = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
||||
@ -689,7 +690,7 @@ public:
|
||||
else
|
||||
{
|
||||
// A and B are variable inputs; non-const bias is not considered
|
||||
CV_Assert(inputsWrapper.size() == 2);
|
||||
CV_Assert(inputs.size() == 2);
|
||||
CV_Assert(nodes.size() == 2);
|
||||
|
||||
// set attributes
|
||||
@ -698,7 +699,7 @@ public:
|
||||
|
||||
// set inputs : x2 (weight)
|
||||
auto op_x2 = nodes[1].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto x2_desc = inputsWrapper[1].dynamicCast<CannBackendWrapper>()->getTensorDesc();
|
||||
auto x2_desc = inputs[1].dynamicCast<CannBackendWrapper>()->getTensorDesc();
|
||||
op->set_input_x2_by_name(*op_x2, "y");
|
||||
op->update_input_desc_x2(*x2_desc);
|
||||
}
|
||||
|
@ -445,10 +445,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
// create operator
|
||||
auto op = std::make_shared<ge::op::LRN>(name);
|
||||
|
@ -102,7 +102,7 @@ public:
|
||||
{
|
||||
#ifdef HAVE_CANN
|
||||
if (backendId == DNN_BACKEND_CANN)
|
||||
return op == OPERATION::ADD || op == OPERATION::PROD || op == OPERATION::DIV ||
|
||||
return op == OPERATION::ADD || op == OPERATION::PROD || op == OPERATION::SUB ||
|
||||
op == OPERATION::DIV || op == OPERATION::MAX || op == OPERATION::MIN;
|
||||
#endif
|
||||
if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
|
||||
@ -709,22 +709,23 @@ public:
|
||||
#endif
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
CV_Assert(inputsWrapper.size() == 2);
|
||||
CV_Assert(inputs.size() == 2);
|
||||
CV_Assert(nodes.size() == 2);
|
||||
|
||||
auto op_x1 = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto x1 = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x1 = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x1_desc = x1->getTensorDesc();
|
||||
auto op_x2 = nodes[1].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto x2 = inputsWrapper[1].dynamicCast<CannBackendWrapper>();
|
||||
auto x2 = inputs[1].dynamicCast<CannBackendWrapper>();
|
||||
auto x2_desc = x2->getTensorDesc();
|
||||
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
||||
|
||||
std::shared_ptr<ge::Operator> eltwise_operator = nullptr;
|
||||
// add, mul, div, max, min
|
||||
// add, mul, sub, div, max, min
|
||||
switch (op)
|
||||
{
|
||||
#define BUILD_CANN_ELTWISE_OP(op_type, class_name, op_name) \
|
||||
@ -740,6 +741,7 @@ public:
|
||||
} break;
|
||||
BUILD_CANN_ELTWISE_OP(OPERATION::ADD, Add, name);
|
||||
BUILD_CANN_ELTWISE_OP(OPERATION::PROD, Mul, name);
|
||||
BUILD_CANN_ELTWISE_OP(OPERATION::SUB, Sub, name);
|
||||
BUILD_CANN_ELTWISE_OP(OPERATION::DIV, Xdivy, name);
|
||||
BUILD_CANN_ELTWISE_OP(OPERATION::MAX, Maximum, name);
|
||||
BUILD_CANN_ELTWISE_OP(OPERATION::MIN, Minimum, name);
|
||||
|
@ -222,10 +222,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
// create operator
|
||||
auto op = std::make_shared<ge::op::PadV3>(name);
|
||||
|
@ -441,10 +441,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
// create operator
|
||||
auto op = std::make_shared<ge::op::Permute>(name);
|
||||
|
@ -548,10 +548,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto x_desc = x->getTensorDesc();
|
||||
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
||||
|
@ -327,10 +327,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
// create operator
|
||||
auto op = std::make_shared<ge::op::Reshape>(name);
|
||||
|
@ -312,10 +312,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x_desc = x->getTensorDesc();
|
||||
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
|
||||
auto output_y_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
|
||||
@ -334,7 +335,8 @@ public:
|
||||
op->update_input_desc_x(*x_desc);
|
||||
// set inputs : size
|
||||
std::vector<int> shape_of_size_mat{2};
|
||||
Mat size_mat(2, 1, CV_32S, Scalar(outHeight, outWidth));
|
||||
std::vector<int> size_vec{outHeight, outWidth};
|
||||
Mat size_mat(shape_of_size_mat, CV_32S, size_vec.data());
|
||||
auto op_const_size = std::make_shared<CannConstOp>(size_mat.data, size_mat.type(), shape_of_size_mat, cv::format("%s_size", name.c_str()));
|
||||
op->set_input_size(*(op_const_size->getOp()));
|
||||
op->update_input_desc_size(*(op_const_size->getTensorDesc()));
|
||||
|
@ -634,11 +634,12 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
bool isSplit = sliceRanges.size() > 1;
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
if (isSplit)
|
||||
{
|
||||
|
@ -365,10 +365,11 @@ public:
|
||||
}
|
||||
|
||||
#ifdef HAVE_CANN
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
|
||||
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputs,
|
||||
const std::vector<Ptr<BackendWrapper> > &outputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
|
||||
auto x = inputs[0].dynamicCast<CannBackendWrapper>();
|
||||
|
||||
// create operator
|
||||
auto op = std::make_shared<ge::op::SoftmaxV2>(name);
|
||||
|
@ -117,7 +117,7 @@ void NetImplCann::initBackend(const std::vector<LayerPin>& blobsToKeep_)
|
||||
if (ld.id != 0 && !layer->supportBackend(preferableBackend))
|
||||
{
|
||||
newWasSupported = false;
|
||||
CV_LOG_INFO(NULL, "DNN/CANN: layer (name=" << ld.name << ", type=" << ld.type << ") is not supported by CANN backend. Going back to CPU backend");
|
||||
CV_LOG_ONCE_WARNING(NULL, "DNN/CANN: layer (name=" << ld.name << ", type=" << ld.type << ") is not supported by CANN backend. Going back to default backend on CPU target");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -202,7 +202,7 @@ void NetImplCann::initBackend(const std::vector<LayerPin>& blobsToKeep_)
|
||||
}
|
||||
|
||||
CV_LOG_INFO(NULL, "DNN/CANN: converting layer " << ld.name << "@" << ld.type << "@" << ld.id << " to CANN operator");
|
||||
auto backendNode = layer->initCann(ld.inputBlobsWrappers, layerInputNodes); // it's ok if ld.name is empty
|
||||
auto backendNode = layer->initCann(ld.inputBlobsWrappers, ld.outputBlobsWrappers, layerInputNodes); // it's ok if ld.name is empty
|
||||
|
||||
// collect outputs
|
||||
bool isOutputNode = ld.consumers.size() == 0 ? true : false;
|
||||
|
Loading…
Reference in New Issue
Block a user