From b44cb33d2f85d405b649a65141d8cc3a019ecf27 Mon Sep 17 00:00:00 2001 From: andrewerf Date: Fri, 20 Oct 2023 11:49:27 +0300 Subject: [PATCH] Merge pull request #21066 from andrewerf:21052-openvino-native-onnx Native ONNX to Inference Engine backend #21066 Resolves #21052 ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or other license that is incompatible with OpenCV - [x] The PR is proposed to proper branch - [x] There is reference to original bug report and related work - [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable - [ ] The feature is well documented and sample code can be built with the project CMake --- modules/dnn/include/opencv2/dnn/dnn.hpp | 6 ++--- modules/dnn/src/dnn_read.cpp | 8 ++++--- modules/dnn/test/test_misc.cpp | 30 +++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 6 deletions(-) diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index 2bd3f790b4..02a76d403b 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -1021,14 +1021,14 @@ CV__DNN_INLINE_NS_BEGIN * * `*.pb` (TensorFlow, https://www.tensorflow.org/) * * `*.t7` | `*.net` (Torch, http://torch.ch/) * * `*.weights` (Darknet, https://pjreddie.com/darknet/) - * * `*.bin` (DLDT, https://software.intel.com/openvino-toolkit) + * * `*.bin` | `*.onnx` (OpenVINO, https://software.intel.com/openvino-toolkit) * * `*.onnx` (ONNX, https://onnx.ai/) * @param[in] config Text file contains network configuration. It could be a * file with the following extensions: * * `*.prototxt` (Caffe, http://caffe.berkeleyvision.org/) * * `*.pbtxt` (TensorFlow, https://www.tensorflow.org/) * * `*.cfg` (Darknet, https://pjreddie.com/darknet/) - * * `*.xml` (DLDT, https://software.intel.com/openvino-toolkit) + * * `*.xml` (OpenVINO, https://software.intel.com/openvino-toolkit) * @param[in] framework Explicit framework name tag to determine a format. * @returns Net object. * @@ -1064,7 +1064,7 @@ CV__DNN_INLINE_NS_BEGIN * backend. */ CV_EXPORTS_W - Net readNetFromModelOptimizer(const String &xml, const String &bin); + Net readNetFromModelOptimizer(const String &xml, const String &bin = ""); /** @brief Load a network from Intel's Model Optimizer intermediate representation. * @param[in] bufferModelConfig Buffer contains XML configuration with network's topology. diff --git a/modules/dnn/src/dnn_read.cpp b/modules/dnn/src/dnn_read.cpp index 9c06ced3c4..c7eb786d01 100644 --- a/modules/dnn/src/dnn_read.cpp +++ b/modules/dnn/src/dnn_read.cpp @@ -43,9 +43,11 @@ Net readNet(const String& _model, const String& _config, const String& _framewor std::swap(model, config); return readNetFromDarknet(config, model); } - if (framework == "dldt" || modelExt == "bin" || configExt == "bin" || modelExt == "xml" || configExt == "xml") + if (framework == "dldt" || framework == "openvino" || + modelExt == "bin" || configExt == "bin" || + modelExt == "xml" || configExt == "xml") { - if (modelExt == "xml" || configExt == "bin") + if (modelExt == "xml" || configExt == "bin" || modelExt == "onnx") std::swap(model, config); return readNetFromModelOptimizer(config, model); } @@ -68,7 +70,7 @@ Net readNet(const String& _framework, const std::vector& bufferModel, return readNetFromDarknet(bufferConfig, bufferModel); else if (framework == "torch") CV_Error(Error::StsNotImplemented, "Reading Torch models from buffers"); - else if (framework == "dldt") + else if (framework == "dldt" || framework == "openvino") return readNetFromModelOptimizer(bufferConfig, bufferModel); else if (framework == "tflite") return readNetFromTFLite(bufferModel); diff --git a/modules/dnn/test/test_misc.cpp b/modules/dnn/test/test_misc.cpp index 0c5fb28c5d..da194247d3 100644 --- a/modules/dnn/test/test_misc.cpp +++ b/modules/dnn/test/test_misc.cpp @@ -6,6 +6,7 @@ // Third party copyrights are property of their respective owners. #include "test_precomp.hpp" +#include "npy_blob.hpp" #include #include #include // CV_DNN_REGISTER_LAYER_CLASS @@ -871,6 +872,35 @@ TEST_P(Test_Model_Optimizer, flexible_inputs) normAssert(ref, out, 0, 0); } +TEST_P(Test_Model_Optimizer, readONNX) +{ + const Backend backendId = get<0>(GetParam()); + const Target targetId = get<1>(GetParam()); + + ASSERT_EQ(DNN_BACKEND_INFERENCE_ENGINE_NGRAPH, backendId); + + const std::string& model = findDataFile("dnn/onnx/models/convolution.onnx"); + + std::vector nets = { + // Old API + readNetFromModelOptimizer(model, ""), + readNet("", model, "dldt"), + // New API + readNetFromModelOptimizer(model), + readNet(model, "", "openvino") + }; + + Mat inp = blobFromNPY(findDataFile("dnn/onnx/data/input_convolution.npy")); + Mat ref = blobFromNPY(findDataFile("dnn/onnx/data/output_convolution.npy")); + + for (int i = 0; i < nets.size(); ++i) { + nets[i].setPreferableTarget(targetId); + nets[i].setInput(inp); + Mat out = nets[i].forward(); + normAssert(out, ref, format("Index: %d", i).c_str()); + } +} + INSTANTIATE_TEST_CASE_P(/**/, Test_Model_Optimizer, dnnBackendsAndTargetsIE() );