Merge pull request #25458 from alexlyulkov:al/dnn-openvino-int-support

Added int support for OpenVINO dnn backend #25458

Modified dnn OpenVINO integration to support type inference and int operations.

Added OpenVINO support to Cast, CumSum, Expand, Gather, GatherElements, Scatter, ScatterND, Tile layers.
I tried to add Reduce layer, but looks like OpenVINO uses float values inside Reduce operation so it can't pass our int tests.

OpenVINO uses int32 precision for int64 operations, so I've modified input values for int64 tests when backend is OpenVINO.

OpenVINO has a strange behavior with custom layers and int64 values. After model compilation OpenVINO may change types, so the model can have different output type. That's why these tests were disabled:
- Test_ArgMax_Int.random/0, where GetParam() = (4, NGRAPH/CPU)
- Test_ArgMax_Int.random/6, where GetParam() = (11, NGRAPH/CPU)
- Test_Reduce_Int.random/6, where GetParam() = (11, NGRAPH/CPU)
- Test_Reduce_Int.two_axes/6, where GetParam() = (11, NGRAPH/CPU)

Also these tests were temporary disabled, they didn't work on both 4.x and 5.x branches:
- Test_Caffe_layers.layer_prelu_fc/0, where GetParam() = NGRAPH/CPU
- Test_ONNX_layers.LSTM_Activations/0, where GetParam() = NGRAPH/CPU
- Test_ONNX_layers.Quantized_Convolution/0, where GetParam() = NGRAPH/CPU
- Test_ONNX_layers.Quantized_Eltwise_Scalar/0, where GetParam() = NGRAPH/CPU
- Test_TFLite.EfficientDet_int8/0, where GetParam() = NGRAPH/CPU


### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [x] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [x] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
alexlyulkov 2024-05-15 11:51:59 +03:00 committed by GitHub
parent 5bdc41964a
commit 6af0394cd2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
22 changed files with 374 additions and 50 deletions

View File

@ -15,6 +15,9 @@ jobs:
Ubuntu2004-x64: Ubuntu2004-x64:
uses: opencv/ci-gha-workflow/.github/workflows/OCV-PR-5.x-U20.yaml@main uses: opencv/ci-gha-workflow/.github/workflows/OCV-PR-5.x-U20.yaml@main
Ubuntu2004-x64-OpenVINO:
uses: opencv/ci-gha-workflow/.github/workflows/OCV-PR-5.x-U20-OpenVINO.yaml@main
Ubuntu2204-x64: Ubuntu2204-x64:
uses: opencv/ci-gha-workflow/.github/workflows/OCV-PR-5.x-U22.yaml@main uses: opencv/ci-gha-workflow/.github/workflows/OCV-PR-5.x-U22.yaml@main

View File

@ -48,6 +48,48 @@ ngraphWrappers(const std::vector<Ptr<BackendWrapper> >& ptrs)
return wrappers; return wrappers;
} }
ov::element::Type cvTypeToOvType(MatType cvType)
{
switch (cvType) {
case CV_32F:
return ov::element::f32;
case CV_8U:
return ov::element::u8;
case CV_8S:
return ov::element::i8;
case CV_32S:
return ov::element::i32;
case CV_64S:
return ov::element::i64;
default:
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(cvType).c_str()));
}
}
ov::element::Type cvTypeToOvType(const cv::Mat& mat)
{
return cvTypeToOvType(mat.depth());
}
MatType ovTypeToCvType(ov::element::Type ovType)
{
switch (ovType) {
case ov::element::f32:
return CV_32F;
case ov::element::u8:
return CV_8U;
case ov::element::i8:
return CV_8S;
case ov::element::i32:
return CV_32S;
case ov::element::i64:
return CV_64S;
default:
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", ovType.get_type_name().c_str()));
}
}
class NgraphCustomOp: public ov::op::Op { class NgraphCustomOp: public ov::op::Op {
public: public:
OPENVINO_OP(kOpenCVLayersType); OPENVINO_OP(kOpenCVLayersType);
@ -60,6 +102,19 @@ public:
void validate_and_infer_types() override void validate_and_infer_types() override
{ {
std::vector<MatType> inputTypes(get_input_size());
std::vector<MatType> internalTypes;
std::vector<MatType> outputTypes;
for (int i = 0; i < get_input_size(); ++i)
{
inputTypes[i] = ovTypeToCvType(get_input_element_type(i));
}
cvLayer->getTypes(inputTypes, outputs.size(), internals.size(), outputTypes, internalTypes);
for (int i = 0; i < internals.size(); ++i) {
if (internals[i].depth() != internalTypes[i])
internals[i] = cv::Mat(shape(internals[i]), internalTypes[i]);
}
set_output_size(outputs.size()); set_output_size(outputs.size());
for (int i = 0; i < outputs.size(); ++i) for (int i = 0; i < outputs.size(); ++i)
{ {
@ -67,7 +122,7 @@ public:
for (int j = 0; j < outputs[i].dims; ++j) { for (int j = 0; j < outputs[i].dims; ++j) {
shape.push_back(outputs[i].size[j]); shape.push_back(outputs[i].size[j]);
} }
set_output_type(i, get_input_element_type(0), shape); set_output_type(i, cvTypeToOvType(outputTypes[i]), shape);
} }
} }
@ -270,7 +325,7 @@ ov::ParameterVector InfEngineNgraphNet::setInputs(const std::vector<cv::Mat>& in
for (size_t i = 0; i < inputs.size(); i++) for (size_t i = 0; i < inputs.size(); i++)
{ {
std::vector<size_t> shape = getShape<size_t>(inputs[i]); std::vector<size_t> shape = getShape<size_t>(inputs[i]);
auto inp = std::make_shared<ov::op::v0::Parameter>(ov::element::f32, ov::Shape(shape)); auto inp = std::make_shared<ov::op::v0::Parameter>(cvTypeToOvType(inputs[i]), ov::Shape(shape));
inp->set_friendly_name(names[i]); inp->set_friendly_name(names[i]);
auto it = std::find_if(inputs_vec.begin(), inputs_vec.end(), auto it = std::find_if(inputs_vec.begin(), inputs_vec.end(),
@ -427,16 +482,7 @@ void NgraphBackendLayer::forward(InputArrayOfArrays inputs, OutputArrayOfArrays
ov::Tensor wrapToNgraphBlob(const Mat& m) { ov::Tensor wrapToNgraphBlob(const Mat& m) {
std::vector<size_t> shape = getShape<size_t>(m); std::vector<size_t> shape = getShape<size_t>(m);
if (m.type() == CV_32F) return ov::Tensor(cvTypeToOvType(m), shape, m.data);
return ov::Tensor(ov::element::f32, shape, m.data);
else if (m.type() == CV_8U)
return ov::Tensor(ov::element::u8, shape, m.data);
else if (m.type() == CV_8SC1)
return ov::Tensor(ov::element::i8, shape, m.data);
else if (m.type() == CV_32SC1)
return ov::Tensor(ov::element::i32, shape, m.data);
else
CV_Error(Error::StsNotImplemented, format("Unsupported data type %s", typeToString(m.type()).c_str()));
} }
@ -445,6 +491,7 @@ NgraphBackendWrapper::NgraphBackendWrapper(int targetId, const cv::Mat& m)
, host((Mat*)&m) , host((Mat*)&m)
{ {
blob = wrapToNgraphBlob(m); blob = wrapToNgraphBlob(m);
hostMatDepth = m.depth();
} }
NgraphBackendWrapper::NgraphBackendWrapper(Ptr<BackendWrapper> wrapper) NgraphBackendWrapper::NgraphBackendWrapper(Ptr<BackendWrapper> wrapper)

View File

@ -29,6 +29,10 @@ namespace cv { namespace dnn {
#ifdef HAVE_DNN_NGRAPH #ifdef HAVE_DNN_NGRAPH
ov::element::Type cvTypeToOvType(MatType cvType);
ov::element::Type cvTypeToOvType(const cv::Mat& mat);
MatType ovTypeToCvType(ov::element::Type ovType);
class InfEngineNgraphNode; class InfEngineNgraphNode;
class InfEngineNgraphNet class InfEngineNgraphNet

View File

@ -570,6 +570,7 @@ public:
CV_Assert(!blobs.empty()); CV_Assert(!blobs.empty());
CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1); CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1);
CV_CheckTypeEQ(weightsMat.type(), CV_8S, ""); CV_CheckTypeEQ(weightsMat.type(), CV_8S, "");
CV_CheckTypeEQ(blobs[0].type(), CV_8S, "");
auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
std::vector<size_t> dims = ieInpNode.get_shape(); std::vector<size_t> dims = ieInpNode.get_shape();
CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, ""); CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, "");
@ -581,7 +582,7 @@ public:
const int inpGroupCn = nodes.size() > 1 ? ieWeights.get_shape()[1] : blobs[0].size[1]; const int inpGroupCn = nodes.size() > 1 ? ieWeights.get_shape()[1] : blobs[0].size[1];
const int group = inpCn / inpGroupCn; const int group = inpCn / inpGroupCn;
std::vector<size_t> kernel_shape; std::vector<int64_t> kernel_shape;
if (group != 1) if (group != 1)
{ {
kernel_shape.push_back(group); kernel_shape.push_back(group);
@ -592,7 +593,7 @@ public:
if (nodes.size() == 1) if (nodes.size() == 1)
{ {
ieWeights = std::make_shared<ov::op::v0::Constant>(ov::element::i8, kernel_shape, blobs[0].data); ieWeights = std::make_shared<ov::op::v0::Constant>(ov::element::i8, ov::Shape(kernel_shape.begin(), kernel_shape.end()), blobs[0].data);
} }
else else
{ {
@ -655,7 +656,7 @@ public:
pad_type); pad_type);
} }
std::vector<size_t> shape(conv_node.get_shape().size(), 1); std::vector<int64_t> shape(conv_node.get_shape().size(), 1);
shape[1] = conv_node.get_shape()[1]; shape[1] = conv_node.get_shape()[1];
if (biasvec.size() || nodes.size() == 3) if (biasvec.size() || nodes.size() == 3)
{ {
@ -672,7 +673,7 @@ public:
for (int i = 0; i < numOutput; ++i) { for (int i = 0; i < numOutput; ++i) {
ovBias[i] = (biasvec[i] + input_zp * cv::sum(blobs[0].row(i))[0]) * outputMultiplier[i] * output_sc; ovBias[i] = (biasvec[i] + input_zp * cv::sum(blobs[0].row(i))[0]) * outputMultiplier[i] * output_sc;
} }
bias = std::make_shared<ov::op::v0::Constant>(ov::element::f32, ov::Shape(shape), ovBias.data()); bias = std::make_shared<ov::op::v0::Constant>(ov::element::f32, ov::Shape(shape.begin(), shape.end()), ovBias.data());
} }
conv_node = std::make_shared<ov::op::v1::Add>(conv_node, bias, ov::op::AutoBroadcastType::NUMPY); conv_node = std::make_shared<ov::op::v1::Add>(conv_node, bias, ov::op::AutoBroadcastType::NUMPY);
} }

View File

@ -134,7 +134,8 @@ struct DataLayer : public Layer
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV; return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE

View File

@ -161,8 +161,7 @@ public:
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node; auto ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
ov::OutputVector inp{ieInpNode}; auto blank = std::make_shared<ov::op::v1::ConvertLike>(ieInpNode, ieInpNode);
auto blank = std::make_shared<ov::op::v0::Concat>(inp, 0);
return Ptr<BackendNode>(new InfEngineNgraphNode(blank)); return Ptr<BackendNode>(new InfEngineNgraphNode(blank));
} }
#endif // HAVE_DNN_NGRAPH #endif // HAVE_DNN_NGRAPH

View File

@ -3,6 +3,8 @@
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
@ -19,7 +21,8 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV; return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs, virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -83,6 +86,15 @@ public:
inputs[0].convertTo(outputs[0], outputs[0].depth()); inputs[0].convertTo(outputs[0], outputs[0].depth());
} }
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto cast = std::make_shared<ov::op::v0::Convert>(nodes[0].dynamicCast<InfEngineNgraphNode>()->node, cvTypeToOvType(outputType));
return Ptr<BackendNode>(new InfEngineNgraphNode(cast));
}
#endif // HAVE_DNN_NGRAPH
private: private:
int outputType; int outputType;
}; };

View File

@ -142,23 +142,10 @@ public:
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{ {
ov::element::Type dType;
if (blobs[0].depth() == CV_32F) {
dType = ov::element::f32;
} else if (blobs[0].depth() == CV_32S) {
dType = ov::element::i32;
} else if (blobs[0].depth() == CV_8S) {
dType = ov::element::i8;
} else {
CV_Error(Error::StsNotImplemented, format("Unexpected Const data depth: %d", blobs[0].depth()));
}
std::shared_ptr<ov::Node> node = std::shared_ptr<ov::Node> node =
std::make_shared<ov::op::v0::Constant>(dType, std::make_shared<ov::op::v0::Constant>(cvTypeToOvType(blobs[0]),
getShape<size_t>(blobs[0]), getShape<size_t>(blobs[0]),
blobs[0].data); blobs[0].data);
if (node->get_element_type() != ov::element::f32) {
node = std::make_shared<ov::op::v0::Convert>(node, ov::element::f32);
}
return Ptr<BackendNode>(new InfEngineNgraphNode(node)); return Ptr<BackendNode>(new InfEngineNgraphNode(node));
} }
#endif // HAVE_DNN_NGRAPH #endif // HAVE_DNN_NGRAPH

View File

@ -3,6 +3,8 @@
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
@ -23,6 +25,12 @@ public:
setParamsFrom(params); setParamsFrom(params);
} }
virtual bool supportBackend(int backendId) CV_OVERRIDE
{
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
}
bool getMemoryShapes(const std::vector<MatShape> &inputs, bool getMemoryShapes(const std::vector<MatShape> &inputs,
const int requiredOutputs, const int requiredOutputs,
std::vector<MatShape> &outputs, std::vector<MatShape> &outputs,
@ -151,6 +159,36 @@ public:
} }
} }
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
std::shared_ptr<ov::op::v0::CumSum> cumsum;
if (nodes.size() == 2)
{
int32_t axis_shape = 1;
auto axis_scalar = std::make_shared<ov::op::v1::Reshape>(
nodes[1].dynamicCast<InfEngineNgraphNode>()->node,
std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, &axis_shape),
false);
cumsum = std::make_shared<ov::op::v0::CumSum>(
nodes[0].dynamicCast<InfEngineNgraphNode>()->node,
std::make_shared<ov::op::v0::Convert>(axis_scalar, ov::element::i32),
exclusive_raw,
reverse_raw);
}
else
{
cumsum = std::make_shared<ov::op::v0::CumSum>(
nodes[0].dynamicCast<InfEngineNgraphNode>()->node,
std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, &axis_raw),
exclusive_raw,
reverse_raw);
}
return Ptr<BackendNode>(new InfEngineNgraphNode(cumsum));
}
#endif // HAVE_DNN_NGRAPH
int axis_raw; int axis_raw;
int exclusive_raw; int exclusive_raw;
int reverse_raw; int reverse_raw;

View File

@ -3,6 +3,8 @@
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
namespace cv { namespace dnn { namespace cv { namespace dnn {
@ -27,8 +29,10 @@ public:
const_input_1d = params.get("const_input_1d", false); const_input_1d = params.get("const_input_1d", false);
} }
virtual bool supportBackend(int backendId) CV_OVERRIDE { virtual bool supportBackend(int backendId) CV_OVERRIDE
return backendId == DNN_BACKEND_OPENCV; {
return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs, virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -145,6 +149,25 @@ public:
} }
} }
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto input_shape = nodes[0].dynamicCast<InfEngineNgraphNode>()->node.get_shape();
CV_CheckGE(target_shape.size(), input_shape.size(), "");
std::vector<int32_t> output_shape(target_shape.begin(), target_shape.end());
for (int i = 1; i < input_shape.size() + 1; ++i)
output_shape[output_shape.size() - i] = std::max(
(int32_t)input_shape[input_shape.size() - i],
output_shape[output_shape.size() - i]);
auto shape_node = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{output_shape.size()}, output_shape.data());
auto expand = std::make_shared<ov::op::v3::Broadcast>(nodes[0].dynamicCast<InfEngineNgraphNode>()->node, shape_node);
return Ptr<BackendNode>(new InfEngineNgraphNode(expand));
}
#endif // HAVE_DNN_NGRAPH
private: private:
MatShape target_shape; MatShape target_shape;
bool const_input_1d; bool const_input_1d;

View File

@ -3,6 +3,8 @@
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
namespace cv { namespace dnn { namespace cv { namespace dnn {
@ -30,7 +32,8 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV; return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs, virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -176,6 +179,31 @@ public:
}; };
} }
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
int64_t indicesBoundInt64 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node.get_shape()[axis];
int32_t indicesBoundInt32 = indicesBoundInt64;
std::shared_ptr<ov::op::v0::Constant> indicesBound;
if (nodes[1].dynamicCast<InfEngineNgraphNode>()->node.get_element_type() == ov::element::i32)
indicesBound = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, &indicesBoundInt32);
else if (nodes[1].dynamicCast<InfEngineNgraphNode>()->node.get_element_type() == ov::element::i64)
indicesBound = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{}, &indicesBoundInt64);
else
CV_Error(Error::StsNotImplemented, "");
auto indicesNonNegative = std::make_shared<ov::op::v1::Mod>(
std::make_shared<ov::op::v1::Add>(nodes[1].dynamicCast<InfEngineNgraphNode>()->node, indicesBound),
indicesBound);
auto gatherElements = std::make_shared<ov::op::v6::GatherElements>(
nodes[0].dynamicCast<InfEngineNgraphNode>()->node,
indicesNonNegative,
axis);
return Ptr<BackendNode>(new InfEngineNgraphNode(gatherElements));
}
#endif // HAVE_DNN_NGRAPH
private: private:
int axis; int axis;
}; };

View File

@ -3,6 +3,8 @@
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
@ -20,7 +22,8 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV; return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs, virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -115,6 +118,19 @@ public:
} }
} }
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto axisNode = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, &m_axis);
auto gather = std::make_shared<ov::op::v8::Gather>(
nodes[0].dynamicCast<InfEngineNgraphNode>()->node,
nodes[1].dynamicCast<InfEngineNgraphNode>()->node,
axisNode);
return Ptr<BackendNode>(new InfEngineNgraphNode(gather));
}
#endif // HAVE_DNN_NGRAPH
private: private:
// The axis to gather along // The axis to gather along
int m_axis; int m_axis;

View File

@ -271,8 +271,29 @@ public:
auto padding_below = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{begins.size()}, begins.data()); auto padding_below = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{begins.size()}, begins.data());
auto padding_above = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{ends.size()}, ends.data()); auto padding_above = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{ends.size()}, ends.data());
auto pad_mode = paddingType == "constant" ? ov::op::PadMode::CONSTANT : ov::op::PadMode::REFLECT; // SYMMETRIC auto pad_mode = paddingType == "constant" ? ov::op::PadMode::CONSTANT : ov::op::PadMode::REFLECT; // SYMMETRIC
std::shared_ptr<ov::op::v0::Constant> arg_pad_value;
float paddingValueFloat = paddingValue; float paddingValueFloat = paddingValue;
auto arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::f32, ov::Shape{}, &paddingValueFloat); int8_t paddingValueInt8 = paddingValue;
int32_t paddingValueInt32 = paddingValue;
int64_t paddingValueInt64 = paddingValue;
switch(ieInpNode.get_element_type())
{
case ov::element::f32:
arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::f32, ov::Shape{}, &paddingValueFloat);
break;
case ov::element::i8:
arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::i8, ov::Shape{}, &paddingValueInt8);
break;
case ov::element::i32:
arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, &paddingValueInt32);
break;
case ov::element::i64:
arg_pad_value = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{}, &paddingValueInt64);
break;
default:
CV_Error(Error::BadDepth, "");
};
auto pad = paddingType == "constant" ? auto pad = paddingType == "constant" ?
std::make_shared<ov::op::v1::Pad>(ieInpNode, padding_below, padding_above, arg_pad_value, pad_mode) : std::make_shared<ov::op::v1::Pad>(ieInpNode, padding_below, padding_above, arg_pad_value, pad_mode) :

View File

@ -3,6 +3,8 @@
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include <algorithm> // for std::max & std::min #include <algorithm> // for std::max & std::min
@ -42,7 +44,8 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV; return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && reduction == REDUCTION::NONE);
} }
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs, virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -240,6 +243,18 @@ public:
CV_Error(Error::StsBadArg, "Unsupported reduction."); CV_Error(Error::StsBadArg, "Unsupported reduction.");
}; };
} }
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto scatterND = std::make_shared<ov::op::v3::ScatterNDUpdate>(
nodes[0].dynamicCast<InfEngineNgraphNode>()->node,
nodes[1].dynamicCast<InfEngineNgraphNode>()->node,
nodes[2].dynamicCast<InfEngineNgraphNode>()->node);
return Ptr<BackendNode>(new InfEngineNgraphNode(scatterND));
}
#endif // HAVE_DNN_NGRAPH
}; };
Ptr<ScatterNDLayer> ScatterNDLayer::create(const LayerParams& params) Ptr<ScatterNDLayer> ScatterNDLayer::create(const LayerParams& params)

View File

@ -3,6 +3,8 @@
// of this distribution and at http://opencv.org/license.html. // of this distribution and at http://opencv.org/license.html.
#include "../precomp.hpp" #include "../precomp.hpp"
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include <algorithm> // for std::max & std::min #include <algorithm> // for std::max & std::min
@ -43,7 +45,8 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV; return backendId == DNN_BACKEND_OPENCV ||
(backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && reduction == REDUCTION::NONE);
} }
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs, virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -236,6 +239,35 @@ public:
}; };
} }
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
int64_t indicesBoundInt64 = nodes[0].dynamicCast<InfEngineNgraphNode>()->node.get_shape()[axis];
int32_t indicesBoundInt32 = indicesBoundInt64;
std::shared_ptr<ov::op::v0::Constant> indicesBound;
if (nodes[1].dynamicCast<InfEngineNgraphNode>()->node.get_element_type() == ov::element::i32)
indicesBound = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, &indicesBoundInt32);
else if (nodes[1].dynamicCast<InfEngineNgraphNode>()->node.get_element_type() == ov::element::i64)
indicesBound = std::make_shared<ov::op::v0::Constant>(ov::element::i64, ov::Shape{}, &indicesBoundInt64);
else
CV_Error(Error::StsNotImplemented, "");
auto indicesNonNegative = std::make_shared<ov::op::v1::Mod>(
std::make_shared<ov::op::v1::Add>(nodes[1].dynamicCast<InfEngineNgraphNode>()->node, indicesBound),
indicesBound);
auto axis_node = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{}, &axis);
auto scatterElements = std::make_shared<ov::op::v3::ScatterElementsUpdate>(
nodes[0].dynamicCast<InfEngineNgraphNode>()->node,
indicesNonNegative,
nodes[2].dynamicCast<InfEngineNgraphNode>()->node,
axis_node);
return Ptr<BackendNode>(new InfEngineNgraphNode(scatterElements));
}
#endif // HAVE_DNN_NGRAPH
private: private:
// Attributes // Attributes
int axis; int axis;

View File

@ -4,6 +4,9 @@
#include "../precomp.hpp" #include "../precomp.hpp"
#include "layers_common.hpp" #include "layers_common.hpp"
#include "../op_inf_engine.hpp"
#include "../ie_ngraph.hpp"
#include <opencv2/dnn/shape_utils.hpp> #include <opencv2/dnn/shape_utils.hpp>
@ -31,7 +34,8 @@ public:
virtual bool supportBackend(int backendId) CV_OVERRIDE virtual bool supportBackend(int backendId) CV_OVERRIDE
{ {
return backendId == DNN_BACKEND_OPENCV; return backendId == DNN_BACKEND_OPENCV ||
backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH;
} }
virtual bool getMemoryShapes(const std::vector<MatShape> &inputs, virtual bool getMemoryShapes(const std::vector<MatShape> &inputs,
@ -108,6 +112,17 @@ public:
tmp.copyTo(out); tmp.copyTo(out);
} }
#ifdef HAVE_DNN_NGRAPH
virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs,
const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
{
auto repeats_node = std::make_shared<ov::op::v0::Constant>(ov::element::i32, ov::Shape{repeats.size()}, repeats.data());
auto tile = std::make_shared<ov::op::v0::Tile>(nodes[0].dynamicCast<InfEngineNgraphNode>()->node, repeats_node);
return Ptr<BackendNode>(new InfEngineNgraphNode(tile));
}
#endif // HAVE_DNN_NGRAPH
private: private:
std::vector<int> repeats; std::vector<int> repeats;
}; };

View File

@ -469,8 +469,8 @@ void Net::Impl::allocateLayer(int lid, const LayersShapesMap& layersShapes)
for (std::set<int>::const_iterator i = ld.inputLayersId.begin(); i != ld.inputLayersId.end(); i++) for (std::set<int>::const_iterator i = ld.inputLayersId.begin(); i != ld.inputLayersId.end(); i++)
allocateLayer(*i, layersShapes); allocateLayer(*i, layersShapes);
// bind inputs // bind inputs for DataLayer
if (ld.id == 0 && netInputLayer->supportBackend(preferableBackend)) // DataLayer if (ld.id == 0 && netInputLayer->supportBackend(preferableBackend))
{ {
ninputs = netInputLayer->inputsData.size(); ninputs = netInputLayer->inputsData.size();
ld.inputBlobsWrappers.resize(ninputs); ld.inputBlobsWrappers.resize(ninputs);
@ -1467,6 +1467,7 @@ void Net::Impl::setInput(InputArray blob, const String& name, double scalefactor
{ {
ld.outputBlobsWrappers[pin.oid]->setHostDirty(); ld.outputBlobsWrappers[pin.oid]->setHostDirty();
} }
netInputLayer->scaleFactors[pin.oid] = scalefactor; netInputLayer->scaleFactors[pin.oid] = scalefactor;
netInputLayer->means[pin.oid] = mean; netInputLayer->means[pin.oid] = mean;
netWasAllocated = netWasAllocated && oldShape; netWasAllocated = netWasAllocated && oldShape;

View File

@ -49,6 +49,9 @@ Mat infEngineBlobToMat(const ov::Tensor& blob)
switch (precision) switch (precision)
{ {
case ov::element::f32: type = CV_32F; break; case ov::element::f32: type = CV_32F; break;
case ov::element::i8: type = CV_8S; break;
case ov::element::i32: type = CV_32S; break;
case ov::element::i64: type = CV_64S; break;
case ov::element::u8: type = CV_8U; break; case ov::element::u8: type = CV_8U; break;
default: default:
CV_Error(Error::StsNotImplemented, "Unsupported blob precision"); CV_Error(Error::StsNotImplemented, "Unsupported blob precision");

View File

@ -31,6 +31,8 @@ TEST_P(Test_NaryEltwise_Int, random)
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input1(inShape, matType); Mat input1(inShape, matType);
cv::randu(input1, low, low + 100); cv::randu(input1, low, low + 100);
Mat input2(inShape, matType); Mat input2(inShape, matType);
@ -40,7 +42,7 @@ TEST_P(Test_NaryEltwise_Int, random)
LayerParams lp; LayerParams lp;
lp.type = "NaryEltwise"; lp.type = "NaryEltwise";
lp.name = "testLayer"; lp.name = "testLayer";
lp.set("operation", "sum"); lp.set("operation", "add");
int id = net.addLayerToPrev(lp.name, lp.type, lp); int id = net.addLayerToPrev(lp.name, lp.type, lp);
net.connect(0, 1, id, 1); net.connect(0, 1, id, 1);
@ -98,6 +100,8 @@ TEST_P(Test_Const_Int, random)
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input1(inShape, matType); Mat input1(inShape, matType);
cv::randu(input1, low, low + 100); cv::randu(input1, low, low + 100);
Mat inputConst(inShape, matType); Mat inputConst(inShape, matType);
@ -114,7 +118,7 @@ TEST_P(Test_Const_Int, random)
LayerParams lp; LayerParams lp;
lp.type = "NaryEltwise"; lp.type = "NaryEltwise";
lp.name = "testLayer"; lp.name = "testLayer";
lp.set("operation", "sum"); lp.set("operation", "add");
int idSum = net.addLayer(lp.name, lp.type, lp); int idSum = net.addLayer(lp.name, lp.type, lp);
net.connect(0, 0, idSum, 0); net.connect(0, 0, idSum, 0);
@ -170,6 +174,8 @@ TEST_P(Test_ScatterND_Int, random)
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
@ -275,6 +281,8 @@ TEST_P(Test_Concat_Int, random)
Target target = get<1>(backend_target); Target target = get<1>(backend_target);
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
std::vector<int> inShape1{2, 3, 4, 5}; std::vector<int> inShape1{2, 3, 4, 5};
Mat input1(inShape1, matType); Mat input1(inShape1, matType);
cv::randu(input1, low, low + 100); cv::randu(input1, low, low + 100);
@ -358,8 +366,13 @@ TEST_P(Test_ArgMax_Int, random)
Backend backend = get<0>(backend_target); Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target); Target target = get<1>(backend_target);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // There is a problem with OpenVINO and custom int64 layers. After model compilation the output tensor type changes from int64 to int32
std::vector<int> inShape{5, 4, 3, 2}; std::vector<int> inShape{5, 4, 3, 2};
int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
@ -433,6 +446,8 @@ TEST_P(Test_Blank_Int, random)
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
@ -490,6 +505,8 @@ TEST_P(Test_Expand_Int, random)
std::vector<int> inShape{2, 3, 1, 5}; std::vector<int> inShape{2, 3, 1, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
std::vector<int> outShape{2, 1, 4, 5}; std::vector<int> outShape{2, 1, 4, 5};
@ -554,6 +571,8 @@ TEST_P(Test_Permute_Int, random)
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
std::vector<int> order{0, 2, 3, 1}; std::vector<int> order{0, 2, 3, 1};
@ -619,6 +638,8 @@ TEST_P(Test_GatherElements_Int, random)
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
@ -692,6 +713,8 @@ TEST_P(Test_Gather_Int, random)
std::vector<int> inShape{5, 1}; std::vector<int> inShape{5, 1};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
@ -784,7 +807,9 @@ TEST_P(Test_Pad_Int, random)
Target target = get<1>(backend_target); Target target = get<1>(backend_target);
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
int64_t low = 1000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
std::vector<int> paddings{0, 0, 0, 0, 1, 0, 0, 1}; std::vector<int> paddings{0, 0, 0, 0, 1, 0, 0, 1};
@ -860,6 +885,8 @@ TEST_P(Test_Slice_Int, random)
std::vector<int> begin{0, 4, 0, 0}; std::vector<int> begin{0, 4, 0, 0};
std::vector<int> end{1, 8, 6, 8}; std::vector<int> end{1, 8, 6, 8};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inputShape, matType); Mat input(inputShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
@ -900,6 +927,8 @@ TEST_P(Test_Reshape_Int, random)
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
std::vector<int> outShape{2, 3, 2, 10}; std::vector<int> outShape{2, 3, 2, 10};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
@ -948,6 +977,8 @@ TEST_P(Test_Flatten_Int, random)
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
@ -994,6 +1025,8 @@ TEST_P(Test_Tile_Int, random)
std::vector<int> inShape{2, 3, 4, 5}; std::vector<int> inShape{2, 3, 4, 5};
int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 1000000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 1000000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
std::vector<int> repeats{1, 1, 2, 3}; std::vector<int> repeats{1, 1, 2, 3};
@ -1056,13 +1089,19 @@ TEST_P(Test_Reduce_Int, random)
Backend backend = get<0>(backend_target); Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target); Target target = get<1>(backend_target);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && matType == CV_64S)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // There is a problem with OpenVINO and custom int64 layers. After model compilation the output tensor type changes from int64 to int32
std::vector<int> inShape{5, 4, 3, 2}; std::vector<int> inShape{5, 4, 3, 2};
int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000; int64_t low = matType == CV_64S ? 1000000000000000ll : 100000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 100000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
std::vector<int> axes{1}; std::vector<int> axes{1};
Net net; Net net;
LayerParams lp; LayerParams lp;
lp.type = "Reduce"; lp.type = "Reduce";
lp.name = "testLayer"; lp.name = "testLayer";
@ -1119,8 +1158,13 @@ TEST_P(Test_Reduce_Int, two_axes)
Backend backend = get<0>(backend_target); Backend backend = get<0>(backend_target);
Target target = get<1>(backend_target); Target target = get<1>(backend_target);
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && matType == CV_64S)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // There is a problem with OpenVINO and custom int64 layers. After model compilation the output tensor type changes from int64 to int32
std::vector<int> inShape{5, 4, 3, 2}; std::vector<int> inShape{5, 4, 3, 2};
int64_t low = matType == CV_64S ? 100000000000000ll : 10000000; int64_t low = matType == CV_64S ? 100000000000000ll : 10000000;
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
low = 10000000; // Looks like OpenVINO uses int32 internal values for int64 operations
Mat input(inShape, matType); Mat input(inShape, matType);
cv::randu(input, low, low + 100); cv::randu(input, low, low + 100);
std::vector<int> axes{1, 3}; std::vector<int> axes{1, 3};

View File

@ -362,6 +362,9 @@ TEST_P(Test_Caffe_layers, PReLU)
// TODO: fix an unstable test case // TODO: fix an unstable test case
TEST_P(Test_Caffe_layers, layer_prelu_fc) TEST_P(Test_Caffe_layers, layer_prelu_fc)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO
if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16) if (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)
applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16); applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL_FP16);
// Reference output values are in range [-0.0001, 10.3906] // Reference output values are in range [-0.0001, 10.3906]

View File

@ -1303,6 +1303,9 @@ TEST_P(Test_ONNX_layers, Split_EltwiseMax)
TEST_P(Test_ONNX_layers, LSTM_Activations) TEST_P(Test_ONNX_layers, LSTM_Activations)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO
#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000)
// IE exception: Node Block1326/lstm/reshape_0/permute was not assigned on any pointed device // IE exception: Node Block1326/lstm/reshape_0/permute was not assigned on any pointed device
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16))
@ -2012,6 +2015,9 @@ TEST_P(Test_ONNX_layers, Gemm_bias)
TEST_P(Test_ONNX_layers, Quantized_Convolution) TEST_P(Test_ONNX_layers, Quantized_Convolution)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO
// The difference of QOperator and QDQ format: // The difference of QOperator and QDQ format:
// https://onnxruntime.ai/docs/performance/quantization.html#onnx-quantization-representation-format. // https://onnxruntime.ai/docs/performance/quantization.html#onnx-quantization-representation-format.
{ {
@ -2058,6 +2064,8 @@ TEST_P(Test_ONNX_layers, Quantized_Eltwise)
TEST_P(Test_ONNX_layers, Quantized_Eltwise_Scalar) TEST_P(Test_ONNX_layers, Quantized_Eltwise_Scalar)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO
testONNXModels("quantized_eltwise_scalar"); testONNXModels("quantized_eltwise_scalar");
} }
@ -2651,23 +2659,43 @@ TEST_P(Test_ONNX_layers, CumSum)
testONNXModels("cumsum_2d_dim_1"); testONNXModels("cumsum_2d_dim_1");
testONNXModels("cumsum_3d_dim_2"); testONNXModels("cumsum_3d_dim_2");
testONNXModels("cumsum_3d_dim_2_int32"); testONNXModels("cumsum_3d_dim_2_int32");
}
TEST_P(Test_ONNX_layers, CumSum_int64)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // OpenVINO uses int32 precision for int64 operations
testONNXModels("cumsum_3d_dim_2_int64"); testONNXModels("cumsum_3d_dim_2_int64");
} }
TEST_P(Test_ONNX_layers, ReduceSumInt) TEST_P(Test_ONNX_layers, ReduceSumInt64)
{ {
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // OpenVINO uses int32 precision for int64 operations
testONNXModels("reduce_sum_int64"); testONNXModels("reduce_sum_int64");
} }
TEST_P(Test_ONNX_layers, ScatterInt) TEST_P(Test_ONNX_layers, ScatterInt32)
{ {
testONNXModels("scatter_int32", npy, 0, 0, false, true, 3); testONNXModels("scatter_int32", npy, 0, 0, false, true, 3);
}
TEST_P(Test_ONNX_layers, ScatterInt64)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // OpenVINO uses int32 precision for int64 operations
testONNXModels("scatter_int64", npy, 0, 0, false, true, 3); testONNXModels("scatter_int64", npy, 0, 0, false, true, 3);
} }
TEST_P(Test_ONNX_layers, TileInt) TEST_P(Test_ONNX_layers, TileInt32)
{ {
testONNXModels("tile_int32"); testONNXModels("tile_int32");
}
TEST_P(Test_ONNX_layers, TileInt64)
{
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // OpenVINO uses int32 precision for int64 operations
testONNXModels("tile_int64"); testONNXModels("tile_int64");
} }

View File

@ -210,6 +210,9 @@ TEST_P(Test_TFLite, max_unpooling)
} }
TEST_P(Test_TFLite, EfficientDet_int8) { TEST_P(Test_TFLite, EfficientDet_int8) {
if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // TODO: fix this test for OpenVINO
if (target != DNN_TARGET_CPU || (backend != DNN_BACKEND_OPENCV && if (target != DNN_TARGET_CPU || (backend != DNN_BACKEND_OPENCV &&
backend != DNN_BACKEND_TIMVX && backend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) { backend != DNN_BACKEND_TIMVX && backend != DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)) {
throw SkipTestException("Only OpenCV, TimVX and OpenVINO targets support INT8 on CPU"); throw SkipTestException("Only OpenCV, TimVX and OpenVINO targets support INT8 on CPU");