Merge pull request #23319 from fengyuentau:fix_zoo_issue_136

Related issue: https://github.com/opencv/opencv_zoo/issues/136

Features added:

- Support operators with multiple output: ONNX Split.
- Support Slice without steps.

Bugs fixed:

- Wrong settings in ClipByValue (Relu6).
- Wrong calculation of pads in convolution layer (It is wrong generally but only fixed specifically for CANN for now).

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [x] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
Yuantao Feng 2023-03-14 02:46:33 +08:00 committed by GitHub
parent e03e2e7f94
commit b94e13c8ae
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 317 additions and 202 deletions

View File

@ -347,11 +347,11 @@ CV__DNN_INLINE_NS_BEGIN
/**
* @brief Returns a CANN backend node
*
* @param inputsWrapper layer inputs
* @param index layer id for op name
* @param nodes inputs of this node
* @param inputsWrapper input tensors of this CANN operator
* @param nodes nodes of input tensors
*/
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes);
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes);
/**
* @brief Automatic Halide scheduling based on layer hyper-parameters.

View File

@ -84,7 +84,8 @@ Ptr<BackendNode> Layer::initTimVX(void* timVxInfo,
return Ptr<BackendNode>();
}
Ptr<BackendNode> Layer::initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> Layer::initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
CV_Error(Error::StsNotImplemented, "CANN pipeline of " + type + " layers is not defined.");
return Ptr<BackendNode>();

View File

@ -392,7 +392,8 @@ public:
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(nodes.size() == 1);
CV_Assert(blobs.size() == 4); // must have scale, offset, mean and variance
@ -401,8 +402,7 @@ public:
auto channel = x->host->size[1];
// create operator
std::string op_name = cv::format("bn_%d", index);
auto op = std::make_shared<ge::op::BatchNorm>(op_name);
auto op = std::make_shared<ge::op::BatchNorm>(name);
// set attributes
op->set_attr_epsilon(epsilon);
@ -412,24 +412,24 @@ public:
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
// set inputs : scale (blobs[2])
std::vector<int> shape_{channel};
auto op_const_scale = std::make_shared<CannConstOp>(blobs[2].data, blobs[2].type(), shape_, cv::format("%s_scale", op_name.c_str()));
auto op_const_scale = std::make_shared<CannConstOp>(blobs[2].data, blobs[2].type(), shape_, cv::format("%s_scale", name.c_str()));
op->set_input_scale(*(op_const_scale->getOp()));
op->update_input_desc_scale(*(op_const_scale->getTensorDesc()));
// set inputs : offset (blobs[3])
auto op_const_offset = std::make_shared<CannConstOp>(blobs[3].data, blobs[3].type(), shape_, cv::format("%s_offset", op_name.c_str()));
auto op_const_offset = std::make_shared<CannConstOp>(blobs[3].data, blobs[3].type(), shape_, cv::format("%s_offset", name.c_str()));
op->set_input_offset(*(op_const_offset->getOp()));
op->update_input_desc_offset(*(op_const_offset->getTensorDesc()));
// set inputs : mean (blobs[0])
auto op_const_mean = std::make_shared<CannConstOp>(blobs[0].data, blobs[0].type(), shape_, cv::format("%s_mean", op_name.c_str()));
auto op_const_mean = std::make_shared<CannConstOp>(blobs[0].data, blobs[0].type(), shape_, cv::format("%s_mean", name.c_str()));
op->set_input_mean(*(op_const_mean->getOp()));
op->update_input_desc_mean(*(op_const_mean->getTensorDesc()));
// set inputs : variance (blobs[1])
auto op_const_var = std::make_shared<CannConstOp>(blobs[1].data, blobs[1].type(), shape_, cv::format("%s_var", op_name.c_str()));
auto op_const_var = std::make_shared<CannConstOp>(blobs[1].data, blobs[1].type(), shape_, cv::format("%s_var", name.c_str()));
op->set_input_variance(*(op_const_var->getOp()));
op->update_input_desc_variance(*(op_const_var->getTensorDesc()));

View File

@ -121,7 +121,8 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto x_desc = x->getTensorDesc();
@ -129,11 +130,10 @@ public:
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
// create operator
std::string op_name = cv::format("identity_%d", index);
auto op = std::make_shared<ge::op::Identity>(op_name);
auto op = std::make_shared<ge::op::Identity>(name);
// set inputs
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
op->update_input_desc_x(*x_desc);
// set output

View File

@ -367,13 +367,13 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(inputsWrapper.size() == nodes.size());
// create operator
std::string op_name = cv::format("concat_%d", index);
auto op = std::make_shared<ge::op::ConcatD>(op_name);
auto op = std::make_shared<ge::op::ConcatD>(name);
// set attributes
int N = inputsWrapper.size();
@ -387,7 +387,7 @@ public:
auto x_i = inputsWrapper[i].dynamicCast<CannBackendWrapper>();
auto x_i_desc = x_i->getTensorDesc();
auto op_x_i = nodes[i].dynamicCast<CannBackendNode>()->getOp();
op->set_dynamic_input_x(i, *op_x_i, "y");
op->set_dynamic_input_x(i, *op_x_i, x_i->name.c_str());
op->update_dynamic_input_desc_x(i, *x_i_desc);
}

View File

@ -84,7 +84,8 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto mat_shape = shape(blobs[0]);
std::vector<int64_t> mat_shape_{mat_shape.begin(), mat_shape.end()};
@ -110,8 +111,7 @@ public:
ge_tensor->SetTensorDesc(*desc);
ge_tensor->SetData(blobs[0].data, ge_shape.GetShapeSize() * size_of_type);
std::string op_name = cv::format("const_%d", index);
auto op = std::make_shared<ge::op::Const>(op_name);
auto op = std::make_shared<ge::op::Const>(name);
op->set_attr_value(*ge_tensor);
return Ptr<BackendNode>(new CannBackendNode(op));

View File

@ -782,7 +782,8 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(!blobs.empty());
CV_Assert(inputsWrapper.size() == 1);
@ -791,18 +792,35 @@ public:
bool has_bias = hasBias() || fusedBias;
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
const int x_in_channel = x->host->size[1];
const auto shape_x = x->host->size; // [b, c, h, w]
const int filter_out_channel = blobs[0].size[1];
const int groups = x_in_channel / filter_out_channel;
const int groups = shape_x[1] / filter_out_channel;
// create operator
std::string op_name = cv::format("conv2d_%d", index);
auto op = std::make_shared<ge::op::Conv2D>(op_name);
auto op = std::make_shared<ge::op::Conv2D>(name);
// set attributes
op->set_attr_strides(ge::Operator::OpListInt(
{1, 1, (int64_t)strides[0], (int64_t)strides[1]}
));
// recalculate pads in case of "SAME" padMode with odd pads
// since in 'getConvPoolPaddings' pads are divided equally
// leading to the loss of one pad
if (padMode == "SAME")
{
for (int i = 0; i < pads_begin.size(); i++) {
if (strides[i] <= kernel_size[i])
{
int pads_at_i = kernel_size[i] - 1 - (shape_x[i+2] - 1 + strides[i]) % strides[i];
pads_begin[i] = pads_at_i / 2;
// if odd, add extra padding to the end for SAME_UPPER
// or to the beginning for SAME_LOWER. Since here we cannot
// identity SAME_UPPER and SAME_LOWER, extra padding is always
// added to the end.
pads_end[i] = pads_at_i - pads_begin[i];
}
}
}
op->set_attr_pads(ge::Operator::OpListInt(
{(int64_t)pads_begin[1], (int64_t)pads_end[1], (int64_t)pads_begin[0], (int64_t)pads_end[0]}
));
@ -815,12 +833,12 @@ public:
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
// set inputs : weight
const Mat& w_mat = blobs[0];
auto op_const_weight = std::make_shared<CannConstOp>(w_mat.data, w_mat.type(), shape(w_mat), cv::format("%s_w", op_name.c_str()));
auto op_const_weight = std::make_shared<CannConstOp>(w_mat.data, w_mat.type(), shape(w_mat), cv::format("%s_w", name.c_str()));
op->set_input_filter(*(op_const_weight->getOp()));
op->update_input_desc_filter(*(op_const_weight->getTensorDesc()));
// set inputs : bias
@ -830,7 +848,7 @@ public:
Mat b_mat({out_channel}, CV_32F, &biasvec[0]);
std::vector<int> bias_shape{out_channel};
auto op_const_bias = std::make_shared<CannConstOp>(b_mat.data, b_mat.type(), bias_shape, cv::format("%s_b", op_name.c_str()));
auto op_const_bias = std::make_shared<CannConstOp>(b_mat.data, b_mat.type(), bias_shape, cv::format("%s_b", name.c_str()));
op->set_input_bias(*(op_const_bias->getOp()));
op->update_input_desc_bias(*(op_const_bias->getTensorDesc()));
}

View File

@ -188,9 +188,10 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
return func.initCannOp(inputsWrapper, index, nodes);
return func.initCannOp(Layer::name, inputsWrapper, nodes);
}
#endif // HAVE_CANN
@ -459,7 +460,9 @@ struct ReLUFunctor : public BaseFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
@ -469,10 +472,9 @@ struct ReLUFunctor : public BaseFunctor
if (slope)
{
std::string op_name = cv::format("leakyrelu_%d", index);
auto op = std::make_shared<ge::op::LeakyRelu>(op_name);
auto op = std::make_shared<ge::op::LeakyRelu>(name);
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
op->update_input_desc_x(*x_desc);
op->set_attr_negative_slope(slope);
@ -482,10 +484,9 @@ struct ReLUFunctor : public BaseFunctor
return Ptr<BackendNode>(new CannBackendNode(op));
}
std::string op_name = cv::format("relu_%d", index);
auto op = std::make_shared<ge::op::Relu>(op_name); // FIXIT: Relu6?
auto op = std::make_shared<ge::op::Relu>(name);
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
op->update_input_desc_x(*x_desc);
op->update_output_desc_y(*output_desc);
@ -653,28 +654,29 @@ struct ReLU6Functor : public BaseFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("clip_%d", index);
auto op = std::make_shared<ge::op::ClipByValue>(op_name);
auto op = std::make_shared<ge::op::ClipByValue>(name);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
Mat min_value_mat(1, 1, CV_32F, Scalar(minValue));
std::vector<int> shape_{1};
auto op_const_minv = std::make_shared<CannConstOp>(min_value_mat.data, min_value_mat.type(), shape_, cv::format("%s_min_value", op_name.c_str()));
auto op_const_minv = std::make_shared<CannConstOp>(min_value_mat.data, min_value_mat.type(), shape_, cv::format("%s_min_value", name.c_str()));
op->set_input_clip_value_min(*(op_const_minv->getOp()));
op->update_input_desc_clip_value_min(*(op_const_minv->getTensorDesc()));
Mat max_value_mat(1, 1, CV_32F, Scalar(maxValue));
auto op_const_maxv = std::make_shared<CannConstOp>(max_value_mat.data, max_value_mat.type(), shape_, cv::format("%s_max_value", op_name.c_str()));
op->set_input_clip_value_min(*(op_const_maxv->getOp()));
op->update_input_desc_clip_value_min(*(op_const_maxv->getTensorDesc()));
auto op_const_maxv = std::make_shared<CannConstOp>(max_value_mat.data, max_value_mat.type(), shape_, cv::format("%s_max_value", name.c_str()));
op->set_input_clip_value_max(*(op_const_maxv->getOp()));
op->update_input_desc_clip_value_max(*(op_const_maxv->getTensorDesc()));
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_output_desc_y(*output_desc);
@ -805,7 +807,9 @@ struct BaseDefaultFunctor : public BaseFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
CV_Error(Error::StsNotImplemented, "");
}
@ -925,15 +929,16 @@ struct TanHFunctor : public BaseDefaultFunctor<TanHFunctor>
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("tanh_%d", index);
auto op = std::make_shared<ge::op::Tanh>(op_name);
auto op = std::make_shared<ge::op::Tanh>(name);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
@ -991,17 +996,18 @@ struct SwishFunctor : public BaseDefaultFunctor<SwishFunctor>
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("swish_%d", index);
auto op = std::make_shared<ge::op::Swish>(op_name);
auto op = std::make_shared<ge::op::Swish>(name);
op->set_attr_scale(1.0f);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
@ -1068,15 +1074,16 @@ struct MishFunctor : public BaseDefaultFunctor<MishFunctor>
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("mish_%d", index);
auto op = std::make_shared<ge::op::Mish>(op_name);
auto op = std::make_shared<ge::op::Mish>(name);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
@ -1143,15 +1150,16 @@ struct SigmoidFunctor : public BaseDefaultFunctor<SigmoidFunctor>
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("sigmoid_%d", index);
auto op = std::make_shared<ge::op::Sigmoid>(op_name);
auto op = std::make_shared<ge::op::Sigmoid>(name);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
@ -1220,17 +1228,18 @@ struct ELUFunctor : public BaseDefaultFunctor<ELUFunctor>
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("elu_%d", index);
auto op = std::make_shared<ge::op::Elu>(op_name);
auto op = std::make_shared<ge::op::Elu>(name);
op->set_attr_alpha(alpha);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
@ -1291,15 +1300,16 @@ struct AbsValFunctor : public BaseDefaultFunctor<AbsValFunctor>
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("abs_%d", index);
auto op = std::make_shared<ge::op::Abs>(op_name);
auto op = std::make_shared<ge::op::Abs>(name);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
@ -1352,15 +1362,16 @@ struct BNLLFunctor : public BaseDefaultFunctor<BNLLFunctor>
#endif
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("bnll_%d", index);
auto op = std::make_shared<ge::op::BNLL>(op_name);
auto op = std::make_shared<ge::op::BNLL>(name);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
@ -1408,15 +1419,16 @@ struct CeilFunctor : public BaseDefaultFunctor<CeilFunctor>
#endif
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("bnll_%d", index);
auto op = std::make_shared<ge::op::BNLL>(op_name);
auto op = std::make_shared<ge::op::BNLL>(name);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
@ -1466,15 +1478,16 @@ struct FloorFunctor : public BaseDefaultFunctor<FloorFunctor>
#endif
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
std::string op_name = cv::format("floor_%d", index);
auto op = std::make_shared<ge::op::Floor>(op_name);
auto op = std::make_shared<ge::op::Floor>(name);
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
@ -2320,7 +2333,9 @@ struct PowerFunctor : public BaseFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
CV_Error(Error::StsNotImplemented, "");
}
@ -2574,7 +2589,9 @@ struct ChannelsPReLUFunctor : public BaseFunctor
#endif // HAVE_HALIDE
#ifdef HAVE_CANN
Ptr<BackendNode> initCannOp(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes)
Ptr<BackendNode> initCannOp(const std::string& name,
const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes)
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
@ -2582,14 +2599,13 @@ struct ChannelsPReLUFunctor : public BaseFunctor
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
std::string op_name = cv::format("prelu_%d", index);
auto op = std::make_shared<ge::op::PRelu>(op_name);
auto op = std::make_shared<ge::op::PRelu>(name);
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
op->update_input_desc_x(*x_desc);
std::vector<int> shape_{scale.size[0]}; // scale should be a 1d of shape [n] tensor, and it is a 2d mat of shape [n, 1] in opencv
auto op_const_slope = std::make_shared<CannConstOp>(scale.data, scale.type(), shape_, cv::format("%s_weight", op_name.c_str()));
auto op_const_slope = std::make_shared<CannConstOp>(scale.data, scale.type(), shape_, cv::format("%s_weight", name.c_str()));
op->set_input_weight(*(op_const_slope->getOp()));
op->update_input_desc_weight(*(op_const_slope->getTensorDesc()));

View File

@ -849,7 +849,8 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(inputsWrapper.size() == 2);
CV_Assert(nodes.size() == 2);
@ -866,22 +867,22 @@ public:
// add, mul, div, max, min
switch (op)
{
#define BUILD_CANN_ELTWISE_OP(op_type, class_name, op_name) \
case op_type: { \
auto eltwise_op = \
std::make_shared<ge::op::class_name>(op_name); \
eltwise_op->set_input_x1_by_name(*op_x1, "y"); \
eltwise_op->set_input_x2_by_name(*op_x2, "y"); \
eltwise_op->update_input_desc_x1(*x1_desc); \
eltwise_op->update_input_desc_x2(*x2_desc); \
eltwise_op->update_output_desc_y(*output_desc); \
eltwise_operator = eltwise_op; \
#define BUILD_CANN_ELTWISE_OP(op_type, class_name, op_name) \
case op_type: { \
auto eltwise_op = \
std::make_shared<ge::op::class_name>(op_name); \
eltwise_op->set_input_x1_by_name(*op_x1, x1->name.c_str()); \
eltwise_op->set_input_x2_by_name(*op_x2, x2->name.c_str()); \
eltwise_op->update_input_desc_x1(*x1_desc); \
eltwise_op->update_input_desc_x2(*x2_desc); \
eltwise_op->update_output_desc_y(*output_desc); \
eltwise_operator = eltwise_op; \
} break;
BUILD_CANN_ELTWISE_OP(SUM, Add, cv::format("add_%d", index));
BUILD_CANN_ELTWISE_OP(PROD, Mul, cv::format("mul_%d", index));
BUILD_CANN_ELTWISE_OP(DIV, Xdivy, cv::format("div_%d", index));
BUILD_CANN_ELTWISE_OP(MAX, Maximum, cv::format("max_%d", index));
BUILD_CANN_ELTWISE_OP(MIN, Minimum, cv::format("min_%d", index));
BUILD_CANN_ELTWISE_OP(SUM, Add, name);
BUILD_CANN_ELTWISE_OP(PROD, Mul, name);
BUILD_CANN_ELTWISE_OP(DIV, Xdivy, name);
BUILD_CANN_ELTWISE_OP(MAX, Maximum, name);
BUILD_CANN_ELTWISE_OP(MIN, Minimum, name);
#undef BUILD_CANN_ELTWISE_OP
default: CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
}

View File

@ -176,15 +176,15 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto x_desc = x->getTensorDesc();
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
std::string op_name = cv::format("flatten_%d", index);
auto op = std::make_shared<ge::op::FlattenV2>(op_name);
auto op = std::make_shared<ge::op::FlattenV2>(name);
// set attributes
int num_axes = x->host->dims;
@ -194,7 +194,7 @@ public:
op->set_attr_end_axis(end_axis);
// set inputs
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
op->update_input_desc_x(*x_desc);
// set outputs
op->update_output_desc_y(*output_desc);

View File

@ -662,15 +662,15 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x1 = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto x1_desc = x1->getTensorDesc();
auto op_x1 = nodes[0].dynamicCast<CannBackendNode>()->getOp();
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
std::string op_name = cv::format("matmul_%d", index);
auto op = std::make_shared<ge::op::MatMulV2>(op_name);
auto op = std::make_shared<ge::op::MatMulV2>(name);
if (!blobs.empty()) // if B is const
{
@ -682,7 +682,7 @@ public:
// set inputs
// set inputs : x2 (weight)
auto op_const_weight = std::make_shared<CannConstOp>(weightsMat.data, weightsMat.type(), shape(weightsMat), cv::format("%s_w", op_name.c_str()));
auto op_const_weight = std::make_shared<CannConstOp>(weightsMat.data, weightsMat.type(), shape(weightsMat), cv::format("%s_w", name.c_str()));
op->set_input_x2_by_name(*(op_const_weight->getOp()), "y");
op->update_input_desc_x2(*(op_const_weight->getTensorDesc()));
}
@ -705,12 +705,12 @@ public:
// set inputs
// set inputs : x1 (input)
op->set_input_x1_by_name(*op_x1, "y");
op->set_input_x1_by_name(*op_x1, x1->name.c_str());
op->update_input_desc_x1(*x1_desc);
// set inputs : bias (bias)
auto bias_mat = bias ? biasMat : Mat::zeros(1, weightsMat.size[0], weightsMat.type());
std::vector<int> bias_shape{weightsMat.size[0]};
auto op_const_bias = std::make_shared<CannConstOp>(bias_mat.data, bias_mat.type(), bias_shape, cv::format("%s_b", op_name.c_str()));
auto op_const_bias = std::make_shared<CannConstOp>(bias_mat.data, bias_mat.type(), bias_shape, cv::format("%s_b", name.c_str()));
op->set_input_bias(*(op_const_bias->getOp()));
op->update_input_desc_bias(*(op_const_bias->getTensorDesc()));

View File

@ -445,13 +445,13 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
// create operator
std::string op_name = cv::format("lrn_%d", index);
auto op = std::make_shared<ge::op::LRN>(op_name);
auto op = std::make_shared<ge::op::LRN>(name);
// set attributes
op->set_attr_depth_radius(size);
@ -465,7 +465,7 @@ public:
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);

View File

@ -709,7 +709,8 @@ public:
#endif
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(inputsWrapper.size() == 2);
CV_Assert(nodes.size() == 2);
@ -726,22 +727,22 @@ public:
// add, mul, div, max, min
switch (op)
{
#define BUILD_CANN_ELTWISE_OP(op_type, class_name, op_name) \
case op_type: { \
auto eltwise_op = \
std::make_shared<ge::op::class_name>(op_name); \
eltwise_op->set_input_x1_by_name(*op_x1, "y"); \
eltwise_op->set_input_x2_by_name(*op_x2, "y"); \
eltwise_op->update_input_desc_x1(*x1_desc); \
eltwise_op->update_input_desc_x2(*x2_desc); \
eltwise_op->update_output_desc_y(*output_desc); \
eltwise_operator = eltwise_op; \
#define BUILD_CANN_ELTWISE_OP(op_type, class_name, op_name) \
case op_type: { \
auto eltwise_op = \
std::make_shared<ge::op::class_name>(op_name); \
eltwise_op->set_input_x1_by_name(*op_x1, x1->name.c_str()); \
eltwise_op->set_input_x2_by_name(*op_x2, x2->name.c_str()); \
eltwise_op->update_input_desc_x1(*x1_desc); \
eltwise_op->update_input_desc_x2(*x2_desc); \
eltwise_op->update_output_desc_y(*output_desc); \
eltwise_operator = eltwise_op; \
} break;
BUILD_CANN_ELTWISE_OP(OPERATION::ADD, Add, cv::format("add_%d", index));
BUILD_CANN_ELTWISE_OP(OPERATION::PROD, Mul, cv::format("mul_%d", index));
BUILD_CANN_ELTWISE_OP(OPERATION::DIV, Xdivy, cv::format("div_%d", index));
BUILD_CANN_ELTWISE_OP(OPERATION::MAX, Maximum, cv::format("max_%d", index));
BUILD_CANN_ELTWISE_OP(OPERATION::MIN, Minimum, cv::format("min_%d", index));
BUILD_CANN_ELTWISE_OP(OPERATION::ADD, Add, name);
BUILD_CANN_ELTWISE_OP(OPERATION::PROD, Mul, name);
BUILD_CANN_ELTWISE_OP(OPERATION::DIV, Xdivy, name);
BUILD_CANN_ELTWISE_OP(OPERATION::MAX, Maximum, name);
BUILD_CANN_ELTWISE_OP(OPERATION::MIN, Minimum, name);
#undef BUILD_CANN_ELTWISE_OP
default: CV_Error(Error::StsNotImplemented, "Unsupported eltwise operation");
}

View File

@ -222,13 +222,13 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
// create operator
std::string op_name = cv::format("pad_%d", index);
auto op = std::make_shared<ge::op::PadV3>(op_name);
auto op = std::make_shared<ge::op::PadV3>(name);
// set attributes
op->set_attr_mode(paddingType.c_str());
@ -236,7 +236,7 @@ public:
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
// set inputs : paddings
@ -248,13 +248,13 @@ public:
}
std::vector<int> pads_shape{(int)pads.size()};
Mat paddings_mat(pads_shape, CV_32S, &pads[0]);
auto op_const_paddings = std::make_shared<CannConstOp>(paddings_mat.data, paddings_mat.type(), pads_shape, cv::format("%s_paddings", op_name.c_str()));
auto op_const_paddings = std::make_shared<CannConstOp>(paddings_mat.data, paddings_mat.type(), pads_shape, cv::format("%s_paddings", name.c_str()));
op->set_input_paddings(*(op_const_paddings->getOp()));
op->update_input_desc_paddings(*(op_const_paddings->getTensorDesc()));
// set inputs : constant_values
std::vector<int> constant_values_shape{1};
Mat constant_values_mat(1, 1, CV_32F, Scalar(paddingValue));
auto op_const_constant_values = std::make_shared<CannConstOp>(constant_values_mat.data, constant_values_mat.type(), constant_values_shape, cv::format("%s_constant_values", op_name.c_str()));
auto op_const_constant_values = std::make_shared<CannConstOp>(constant_values_mat.data, constant_values_mat.type(), constant_values_shape, cv::format("%s_constant_values", name.c_str()));
op->set_input_constant_values(*(op_const_constant_values->getOp()));
op->update_input_desc_constant_values(*(op_const_constant_values->getTensorDesc()));

View File

@ -441,13 +441,13 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
// create operator
std::string op_name = cv::format("permute_%d", index);
auto op = std::make_shared<ge::op::Permute>(op_name);
auto op = std::make_shared<ge::op::Permute>(name);
// set attributes
op->set_attr_order(ge::Operator::OpListInt(
@ -457,7 +457,7 @@ public:
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);

View File

@ -548,18 +548,17 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
auto x_desc = x->getTensorDesc();
auto output_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
std::string op_name_base = cv::format("pooling_%d", index);
if (type == MAX)
{
std::string op_name = cv::format("max_%s", op_name_base.c_str());
auto op = std::make_shared<ge::op::MaxPoolV3>(op_name);
auto op = std::make_shared<ge::op::MaxPoolV3>(name);
// set attributes
op->set_attr_ksize(ge::Operator::OpListInt(
@ -580,7 +579,7 @@ public:
op->set_attr_ceil_mode(ceilMode);
// set inputs
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
op->update_input_desc_x(*x_desc);
// set outputs
op->update_output_desc_y(*output_desc);
@ -589,8 +588,7 @@ public:
}
else if (type == AVE)
{
std::string op_name = cv::format("avg_%s", op_name_base.c_str());
auto op = std::make_shared<ge::op::AvgPoolV2>(op_name);
auto op = std::make_shared<ge::op::AvgPoolV2>(name);
// set attributes
op->set_attr_ksize(ge::Operator::OpListInt(
@ -612,7 +610,7 @@ public:
op->set_attr_exclusive(cann_exclusive);
// set inputs
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
op->update_input_desc_x(*x_desc);
// set outputs
op->update_output_desc_y(*output_desc);

View File

@ -327,13 +327,13 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
// create operator
std::string op_name = cv::format("reshape_%d", index);
auto op = std::make_shared<ge::op::Reshape>(op_name);
auto op = std::make_shared<ge::op::Reshape>(name);
// set attributes
op->set_attr_axis(axis);
@ -342,13 +342,13 @@ public:
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
// set inputs : shape
std::vector<int> shape_of_shape{(int)newShapeDesc.size()};
Mat shape_mat(shape_of_shape, CV_32S, newShapeDesc.data());
auto op_const_shape = std::make_shared<CannConstOp>(shape_mat.data, shape_mat.type(), shape_of_shape, cv::format("%s_shape", op_name.c_str()));
auto op_const_shape = std::make_shared<CannConstOp>(shape_mat.data, shape_mat.type(), shape_of_shape, cv::format("%s_shape", name.c_str()));
op->set_input_shape(*(op_const_shape->getOp()));
op->update_input_desc_shape(*(op_const_shape->getTensorDesc()));

View File

@ -312,7 +312,8 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
auto x_desc = x->getTensorDesc();
@ -320,23 +321,21 @@ public:
auto output_y_desc = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
// create operator
std::string op_name = cv::format("resize_%d", index);
if (interpolation == "nearest")
{
auto op = std::make_shared<ge::op::ResizeNearestNeighborV2>(op_name);
auto op = std::make_shared<ge::op::ResizeNearestNeighborV2>(name);
// set attributes
op->set_attr_align_corners(alignCorners);
op->set_attr_half_pixel_centers(halfPixelCenters);
// set inputs : x
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
op->update_input_desc_x(*x_desc);
// set inputs : size
std::vector<int> shape_of_size_mat{2};
Mat size_mat(2, 1, CV_32S, Scalar(outHeight, outWidth));
auto op_const_size = std::make_shared<CannConstOp>(size_mat.data, size_mat.type(), shape_of_size_mat, cv::format("%s_size", op_name.c_str()));
auto op_const_size = std::make_shared<CannConstOp>(size_mat.data, size_mat.type(), shape_of_size_mat, cv::format("%s_size", name.c_str()));
op->set_input_size(*(op_const_size->getOp()));
op->update_input_desc_size(*(op_const_size->getTensorDesc()));
@ -347,21 +346,17 @@ public:
}
else if (interpolation == "opencv_linear" || interpolation == "bilinear")
{
auto op = std::make_shared<ge::op::ResizeBilinearV2>(op_name);
auto op = std::make_shared<ge::op::ResizeBilinearV2D>(name);
// set attributes
op->set_attr_align_corners(alignCorners);
op->set_attr_half_pixel_centers(halfPixelCenters);
std::vector<int64_t> taget_size{(int64_t)outHeight, (int64_t)outWidth};
op->set_attr_size(taget_size);
// set inputs : x
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
op->update_input_desc_x(*x_desc);
// set inputs : size
std::vector<int> shape_of_size_mat{2};
Mat size_mat(2, 1, CV_32S, Scalar(outHeight, outWidth));
auto op_const_size = std::make_shared<CannConstOp>(size_mat.data, size_mat.type(), shape_of_size_mat, cv::format("%s_size", op_name.c_str()));
op->set_input_size(*(op_const_size->getOp()));
op->update_input_desc_size(*(op_const_size->getTensorDesc()));
// set outputs
op->update_output_desc_y(*output_y_desc);

View File

@ -634,18 +634,74 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
CV_Assert(sliceRanges.size() == 1);
CV_Assert(sliceSteps.size() == 1);
CV_Assert(sliceRanges[0].size() == sliceSteps[0].size());
bool isSplit = sliceRanges.size() > 1;
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
if (isSplit)
{
// create operator
auto op = std::make_shared<ge::op::SplitV>(name);
// set attr
int n_split = static_cast<int>(sliceRanges[0].size());
op->set_attr_num_split(n_split);
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, x->name.c_str());
auto desc_x = x->getTensorDesc();
op->update_input_desc_x(*desc_x);
// set inputs : size_splits
std::vector<int> size_splits(n_split);
int cnt_split = 0;
for (size_t i = 0; i < sliceRanges.size() - 1; ++i)
{
auto target_range = sliceRanges[i].back();
size_splits[i] = target_range.end - target_range.start;
cnt_split += size_splits[i];
}
auto shape_x = desc_x->GetShape().GetDims();
CV_CheckGT(shape_x[axis], cnt_split, "DNN/CANN: invalid splits");
size_splits[n_split - 1] = shape_x[axis] - cnt_split;
std::vector<int> shape_size_splits{(int)size_splits.size()};
Mat size_splits_mat(shape_size_splits, CV_32S, size_splits.data());
auto op_const_size_splits = std::make_shared<CannConstOp>(size_splits_mat.data, size_splits_mat.type(), shape_size_splits, cv::format("%s_size_splits", name.c_str()));
op->set_input_size_splits(*(op_const_size_splits->getOp()));
op->update_input_desc_size_splits(*(op_const_size_splits->getTensorDesc()));
// set inputs : split_dim
Mat split_dim_mat(1, 1, CV_32S, Scalar(axis));
std::vector<int> split_dim_shape{1};
auto op_const_split_dim = std::make_shared<CannConstOp>(split_dim_mat.data, split_dim_mat.type(), split_dim_shape, cv::format("%s_split_dim", name.c_str()));
op->set_input_split_dim(*(op_const_split_dim->getOp()));
op->update_input_desc_split_dim(*(op_const_split_dim->getTensorDesc()));
// set outputs
op->create_dynamic_output_y(n_split);
for (uint32_t i = 0; i < n_split; ++i)
{
auto desc_output_y_i = std::make_shared<ge::TensorDesc>(ge::Shape(), ge::FORMAT_NCHW, ge::DT_FLOAT);
op->update_dynamic_output_desc_y(i, *desc_output_y_i);
}
return Ptr<BackendNode>(new CannBackendNode(op));
}
// ONNX-Slice
CV_CheckEQ(sliceRanges.size(), (size_t)1, "");
if (hasSteps)
{
CV_CheckEQ(sliceSteps.size(), (size_t)1, "DNN/CANN/Slice: no support to multiple slices");
CV_CheckEQ(sliceRanges[0].size(), sliceSteps[0].size(), "DNN/CANN/Slice: number of slice ranges does not match number of slice steps");
}
const int dims = x->host->dims;
// create operator
std::string op_name = cv::format("slice_%d", index);
auto op = std::make_shared<ge::op::StridedSliceV2>(op_name);
auto op = std::make_shared<ge::op::StridedSliceV2>(name);
// retrieve begins, ends, axes and steps
std::vector<int> begins, ends, axes, steps;
@ -654,34 +710,37 @@ public:
begins.push_back(sliceRanges[0][i].start);
ends.push_back(sliceRanges[0][i].end);
axes.push_back(i);
steps.push_back(sliceSteps[0][i]);
if (hasSteps)
steps.push_back(sliceSteps[0][i]);
else
steps.push_back(1); // put 1 by default
}
std::vector<int> shape_{dims};
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);
// set inputs : begin
Mat begin_mat(shape_, CV_32S, &begins[0]);
auto op_const_begin = std::make_shared<CannConstOp>(begin_mat.data, begin_mat.type(), shape_, cv::format("%s_begin", op_name.c_str()));
auto op_const_begin = std::make_shared<CannConstOp>(begin_mat.data, begin_mat.type(), shape_, cv::format("%s_begin", name.c_str()));
op->set_input_begin(*(op_const_begin->getOp()));
op->update_input_desc_begin(*(op_const_begin->getTensorDesc()));
// set inputs : end
Mat end_mat(shape_, CV_32S, &ends[0]);
auto op_const_end = std::make_shared<CannConstOp>(end_mat.data, end_mat.type(), shape_, cv::format("%s_end", op_name.c_str()));
auto op_const_end = std::make_shared<CannConstOp>(end_mat.data, end_mat.type(), shape_, cv::format("%s_end", name.c_str()));
op->set_input_end(*(op_const_end->getOp()));
op->update_input_desc_end(*(op_const_end->getTensorDesc()));
// set inputs : axes
Mat axes_mat(shape_, CV_32S, &axes[0]);
auto op_const_axes = std::make_shared<CannConstOp>(axes_mat.data, axes_mat.type(), shape_, cv::format("%s_axes", op_name.c_str()));
auto op_const_axes = std::make_shared<CannConstOp>(axes_mat.data, axes_mat.type(), shape_, cv::format("%s_axes", name.c_str()));
op->set_input_axes(*(op_const_axes->getOp()));
op->update_input_desc_axes(*(op_const_axes->getTensorDesc()));
// set inputs : strides
Mat strides_mat(shape_, CV_32S, &steps[0]);
auto op_const_strides = std::make_shared<CannConstOp>(strides_mat.data, strides_mat.type(), shape_, cv::format("%s_strides", op_name.c_str()));
auto op_const_strides = std::make_shared<CannConstOp>(strides_mat.data, strides_mat.type(), shape_, cv::format("%s_strides", name.c_str()));
op->set_input_strides(*(op_const_strides->getOp()));
op->update_input_desc_strides(*(op_const_strides->getTensorDesc()));

View File

@ -365,13 +365,13 @@ public:
}
#ifdef HAVE_CANN
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper, const int index, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
virtual Ptr<BackendNode> initCann(const std::vector<Ptr<BackendWrapper> > &inputsWrapper,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto x = inputsWrapper[0].dynamicCast<CannBackendWrapper>();
// create operator
std::string op_name = cv::format("softmax_%d", index);
auto op = std::make_shared<ge::op::SoftmaxV2>(op_name);
auto op = std::make_shared<ge::op::SoftmaxV2>(name);
// set attributes
op->set_attr_axes(ge::Operator::OpListInt(
@ -381,7 +381,7 @@ public:
// set inputs
// set inputs : x
auto op_x = nodes[0].dynamicCast<CannBackendNode>()->getOp();
op->set_input_x_by_name(*op_x, "y");
op->set_input_x_by_name(*op_x, x->name.c_str());
auto x_desc = x->getTensorDesc();
op->update_input_desc_x(*x_desc);

View File

@ -124,6 +124,30 @@ void NetImplCann::initBackend(const std::vector<LayerPin>& blobsToKeep_)
if (!newWasSupported)
return ;
// initialize each blob wrappers' names
for (MapIdToLayerData::const_iterator it = layers.begin(); it != layers.end(); ++it)
{
const LayerData& ld = it->second;
if (ld.id == 0)
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
auto cannWrapper = ld.outputBlobsWrappers[i].dynamicCast<CannBackendWrapper>();
// cannWrapper->name = netInputLayer->outNames.empty() ? cv::format("%s_%d", ld.name.c_str(), i) : netInputLayer->outNames[i];
cannWrapper->name = std::string("y");
}
}
else
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); ++i)
{
auto cannWrapper = ld.outputBlobsWrappers[i].dynamicCast<CannBackendWrapper>();
// cannWrapper->name = ld.outputBlobsWrappers.size() > 1 ? (ld.name + ":" + std::to_string(i)) : ld.name;
cannWrapper->name = ld.outputBlobsWrappers.size() > 1 ? (std::string("y") + std::to_string(i)) : std::string("y");
}
}
}
// convert layers to CANN operators,
// collect graph input and output operators,
// collect and input and output wrappers
@ -141,15 +165,16 @@ void NetImplCann::initBackend(const std::vector<LayerPin>& blobsToKeep_)
{
for (int i = 0; i < ld.outputBlobsWrappers.size(); i++)
{
std::string inputName = netInputLayer->outNames.empty() ? cv::format("%s_%d", ld.name.c_str(), i) : netInputLayer->outNames[i];
auto inputOp = std::make_shared<ge::op::Data>(inputName);
// retrieve tensor description
auto wrapper = ld.outputBlobsWrappers[i];
graphInputWrappers.push_back(wrapper);
auto cannWrapper = wrapper.dynamicCast<CannBackendWrapper>();
CV_Assert(!cannWrapper.empty());
// create graph input op
std::string inputOpName = netInputLayer->outNames.empty() ? cv::format("%s_%d", ld.name.c_str(), i) : netInputLayer->outNames[i];
auto inputOp = std::make_shared<ge::op::Data>(inputOpName);
inputOp->update_input_desc_x(*(cannWrapper->desc_));
inputOp->update_output_desc_y(*(cannWrapper->desc_));
@ -170,14 +195,14 @@ void NetImplCann::initBackend(const std::vector<LayerPin>& blobsToKeep_)
{
layerInputNodes.push_back(netInputNodes[layerInputOid]);
}
else // here we do not consider an op with multiple outputs
else
{
layerInputNodes.push_back(layers[layerInputLid].backendNodes[preferableBackend]);
}
}
CV_LOG_INFO(NULL, "DNN/CANN: converting layer " << ld.name << "@" << ld.type << "@" << ld.id << " to CANN operator");
auto backendNode = layer->initCann(ld.inputBlobsWrappers, ld.id, layerInputNodes);
auto backendNode = layer->initCann(ld.inputBlobsWrappers, layerInputNodes); // it's ok if ld.name is empty
// collect outputs
bool isOutputNode = ld.consumers.size() == 0 ? true : false;
@ -201,7 +226,7 @@ void NetImplCann::initBackend(const std::vector<LayerPin>& blobsToKeep_)
// build graph from collected graph inputs and outputs
CV_LOG_INFO(NULL, "DNN/CANN: building ge::Graph");
std::string graphName = cv::format("graph_%d", 0);
std::string graphName = cv::format("graph_%d", networkId);
std::shared_ptr<ge::Graph> graph = std::make_shared<ge::Graph>(graphName.c_str());
(void)graph->SetInputs(graphInputOps);
(void)graph->SetOutputs(graphOutputOps);
@ -292,9 +317,9 @@ std::shared_ptr<ge::ModelBufferData> compileCannGraph(std::shared_ptr<ge::Graph>
#if 0
// (optional). Dump model
AscendString graph_name;
graph.GetName(graph_name);
aclgrphDumpGraph(graph, graph_name.GetString(), 7);
ge::AscendString graph_name;
graph->GetName(graph_name);
aclgrphDumpGraph(*graph, graph_name.GetString(), 7);
// (optional). Save model
aclgrphSaveModel(graph_name.GetString(), *om_model);
#endif

View File

@ -177,7 +177,7 @@ void CannNet::bindInputWrappers(const std::vector<Ptr<BackendWrapper>>& inputWra
void CannNet::bindOutputWrappers(const std::vector<Ptr<BackendWrapper>>& outputWrappers)
{
CV_Assert(outputWrappers.size() == getOutputNum());
CV_CheckEQ(outputWrappers.size(), getOutputNum(), "DNN/CANN: Built graph does not have the same number of outputs of model description");
for (int i = 0; i < outputWrappers.size(); ++i)
{
auto wrapper = outputWrappers[i].dynamicCast<CannBackendWrapper>();

View File

@ -106,6 +106,7 @@ CV__DNN_INLINE_NS_END
Mat* host;
std::shared_ptr<ge::TensorDesc> desc_;
std::string name;
};
class CannNet