mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 03:00:14 +08:00
Merge pull request #24309 from dkurt:gemm_ov_hotfix
Update OpenVINO init of new GEMM layer #24309 ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request CI validation: - [x] 2022.1.0: https://pullrequest.opencv.org/buildbot/builders/precommit_custom_linux/builds/100368 - [ ] 2021.4.2: https://pullrequest.opencv.org/buildbot/builders/precommit_custom_linux/builds/100373 Checklist: - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [x] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
parent
bb171a0c05
commit
2b6d0f36f0
@ -160,8 +160,7 @@ PERF_TEST_P_(Gemm, gemm)
|
||||
}
|
||||
|
||||
Net net;
|
||||
int id = net.addLayerToPrev(lp.name, lp.type, lp);
|
||||
net.connect(0, 0, id, 0);
|
||||
net.addLayerToPrev(lp.name, lp.type, lp);
|
||||
net.setPreferableBackend(backend_id);
|
||||
net.setPreferableTarget(target_id);
|
||||
|
||||
@ -221,14 +220,13 @@ PERF_TEST_P_(Gemm, innerproduct)
|
||||
}
|
||||
|
||||
Net net;
|
||||
int id = net.addLayerToPrev(lp.name, lp.type, lp);
|
||||
net.connect(0, 0, id, 0);
|
||||
net.addLayerToPrev(lp.name, lp.type, lp);
|
||||
net.setPreferableBackend(backend_id);
|
||||
net.setPreferableTarget(target_id);
|
||||
|
||||
// warmup
|
||||
{
|
||||
std::vector<std::string> input_names(2);
|
||||
std::vector<std::string> input_names(1);
|
||||
input_names[0] = "A";
|
||||
net.setInputsNames(input_names);
|
||||
net.setInput(A, input_names[0]);
|
||||
|
@ -287,33 +287,45 @@ public:
|
||||
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
|
||||
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
|
||||
{
|
||||
auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
|
||||
auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
|
||||
std::shared_ptr<ngraph::Node> matmul;
|
||||
int axis = -2;
|
||||
|
||||
if (nodes.size() == 2)
|
||||
{
|
||||
auto& inp2 = nodes[1].dynamicCast<InfEngineNgraphNode>()->node;
|
||||
matmul = std::make_shared<ngraph::op::MatMul>(ieInpNode, inp2, transA, transB);
|
||||
matmul = std::make_shared<ngraph::op::MatMul>(ieInpNode, inp2, trans_a, trans_b);
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<int> shape(1 + normalize_axis(axis, ieInpNode->get_shape().size()), 0);
|
||||
std::shared_ptr<ngraph::Node> ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, getShape(blobs[0]), blobs[0].data);
|
||||
|
||||
int flatten_axis = ieInpNode.get_shape().size() - ieWeights->get_shape().size();
|
||||
if (flatten_axis > 0) {
|
||||
std::vector<int> shape(1 + flatten_axis, 0);
|
||||
shape[shape.size() - 1] = -1;
|
||||
auto inp = std::make_shared<ngraph::op::v1::Reshape>(
|
||||
ieInpNode = std::make_shared<ngraph::op::v1::Reshape>(
|
||||
ieInpNode,
|
||||
std::make_shared<ngraph::op::Constant>(ngraph::element::i32, ngraph::Shape{shape.size()}, shape.data()),
|
||||
true
|
||||
);
|
||||
|
||||
std::vector<size_t> weight_shape{(size_t)blobs[0].size[0], (size_t)blobs[0].size[1]};
|
||||
auto ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, weight_shape, blobs[0].data);
|
||||
matmul = std::make_shared<ngraph::op::MatMul>(inp, ieWeights, transA, transB);
|
||||
}
|
||||
matmul = std::make_shared<ngraph::op::MatMul>(ieInpNode, ieWeights, trans_a, trans_b);
|
||||
}
|
||||
if (alpha != 1.0f) {
|
||||
matmul = std::make_shared<ngraph::op::v1::Multiply>(matmul,
|
||||
std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &alpha)
|
||||
);
|
||||
}
|
||||
|
||||
if (have_bias && const_C) {
|
||||
auto bias_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32,
|
||||
ngraph::Shape{(size_t)blobs.back().size[1]}, blobs.back().data);
|
||||
Mat bias = blobs.back();
|
||||
auto shape = bias.total() == bias.size[0] ? ngraph::Shape{bias.total()} : getShape(bias);
|
||||
std::shared_ptr<ngraph::Node> bias_node = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, shape, bias.data);
|
||||
if (beta != 1.0f) {
|
||||
bias_node = std::make_shared<ngraph::op::v1::Multiply>(bias_node,
|
||||
std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape{1}, &beta)
|
||||
);
|
||||
}
|
||||
matmul = std::make_shared<ngraph::op::v1::Add>(matmul, bias_node, ngraph::op::AutoBroadcastType::NUMPY);
|
||||
}
|
||||
return Ptr<BackendNode>(new InfEngineNgraphNode(matmul));
|
||||
|
Loading…
Reference in New Issue
Block a user