From 344f8c640034cb509930d1c92ff1a73a04101ebc Mon Sep 17 00:00:00 2001 From: Myron Rodrigues <41271144+MRo47@users.noreply.github.com> Date: Tue, 27 May 2025 16:43:49 +0530 Subject: [PATCH] Merge pull request #27363 from MRo47:openvino-npu-support Feature: Add OpenVINO NPU support #27363 ## Why - OpenVINO now supports inference on integrated NPU devices in intel's Core Ultra series processors. - Sometimes as fast as GPU, but should use considerably less power. ## How - The NPU plugin is now available as "NPU" in openvino `ov::Core::get_available_devices()`. - Removed the guards and checks for NPU in available targets for Inference Engine backend. ## Test example ### Pre-requisites - Intel [Core Ultra series processor](https://www.intel.com/content/www/us/en/products/details/processors/core-ultra/edge.html#tab-blade-1-0) - [Intel NPU driver](https://github.com/intel/linux-npu-driver/releases) - OpenVINO 2023.3.0+ (Tested on 2025.1.0) ### Example ```cpp #include #include int main(){ cv::dnn::Net net = cv::dnn::readNet("../yolov8s-openvino/yolov8s.xml", "../yolov8s-openvino/yolov8s.bin"); cv::Size net_input_shape = cv::Size(640, 480); std::cout << "Setting backend to DNN_BACKEND_INFERENCE_ENGINE and target to DNN_TARGET_NPU" << std::endl; net.setPreferableBackend(cv::dnn::DNN_BACKEND_INFERENCE_ENGINE); net.setPreferableTarget(cv::dnn::DNN_TARGET_NPU); cv::Mat image(net_input_shape, CV_8UC3); cv::randu(image, cv::Scalar(0, 0, 0), cv::Scalar(255, 255, 255)); cv::Mat blob = cv::dnn::blobFromImage( image, 1, net_input_shape, cv::Scalar(0, 0, 0), true, false, CV_32F); net.setInput(blob); std::cout << "Running forward" << std::endl; cv::Mat result = net.forward(); std::cout << "Output shape: " << result.size << std::endl; // Output shape: 1 x 84 x 6300 } ``` model files [here](https://limewire.com/d/bPgiA#BhUeSTBnMc) docker image used to build opencv: [ghcr.io/mro47/opencv-builder](https://github.com/MRo47/opencv-builder/blob/main/Dockerfile) Closes #26240 ### Pull Request Readiness Checklist See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request - [x] I agree to contribute to the project under Apache 2 License. - [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV - [x] The PR is proposed to the proper branch - [x] There is a reference to the original bug report and related work - [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable Patch to opencv_extra has the same branch name. - [ ] The feature is well documented and sample code can be built with the project CMake --- doc/tutorials/dnn/dnn_openvino/dnn_openvino.markdown | 11 +++++++++++ modules/dnn/include/opencv2/dnn/dnn.hpp | 2 +- modules/dnn/src/ie_ngraph.cpp | 3 +++ modules/dnn/src/net_openvino.cpp | 3 ++- modules/dnn/src/op_inf_engine.cpp | 2 ++ 5 files changed, 19 insertions(+), 2 deletions(-) diff --git a/doc/tutorials/dnn/dnn_openvino/dnn_openvino.markdown b/doc/tutorials/dnn/dnn_openvino/dnn_openvino.markdown index 8eb965ca55..026366e857 100644 --- a/doc/tutorials/dnn/dnn_openvino/dnn_openvino.markdown +++ b/doc/tutorials/dnn/dnn_openvino/dnn_openvino.markdown @@ -26,3 +26,14 @@ There are 2 approaches how to get OpenCV: - Build OpenCV from source code against specific version of OpenVINO. This approach solves the limitations mentioned above. The instruction how to follow both approaches is provided in [OpenCV wiki](https://github.com/opencv/opencv/wiki/BuildOpenCV4OpenVINO). + +## Supported targets + +OpenVINO backend (DNN_BACKEND_INFERENCE_ENGINE) supports the following [targets](https://docs.opencv.org/4.x/d6/d0f/group__dnn.html#ga709af7692ba29788182cf573531b0ff5): + +- **DNN_TARGET_CPU:** Runs on the CPU, no additional dependencies required. +- **DNN_TARGET_OPENCL, DNN_TARGET_OPENCL_FP16:** Runs on the iGPU, requires OpenCL drivers. Install [intel-opencl-icd](https://launchpad.net/ubuntu/jammy/+package/intel-opencl-icd) on Ubuntu. +- **DNN_TARGET_MYRIAD:** Runs on Intel® VPU like the [Neural Compute Stick](https://www.intel.com/content/www/us/en/products/sku/140109/intel-neural-compute-stick-2/specifications.html), to set up [see](https://www.intel.com/content/www/us/en/developer/archive/tools/neural-compute-stick.html). +- **DNN_TARGET_HDDL:** Runs on the Intel® Movidius™ Myriad™ X High Density Deep Learning VPU, for details [see](https://intelsmartedge.github.io/ido-specs/doc/building-blocks/enhanced-platform-awareness/smartedge-open_hddl/). +- **DNN_TARGET_FPGA:** Runs on Intel® Altera® series FPGAs [see](https://www.intel.com/content/www/us/en/docs/programmable/768970/2025-1/getting-started-guide.html). +- **DNN_TARGET_NPU:** Runs on the integrated Intel® AI Boost processor, requires [Linux drivers](https://github.com/intel/linux-npu-driver/releases/tag/v1.17.0) OR [Windows drivers](https://www.intel.com/content/www/us/en/download/794734/intel-npu-driver-windows.html). \ No newline at end of file diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index e153738215..fbe5044fef 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -72,7 +72,7 @@ CV__DNN_INLINE_NS_BEGIN //! DNN_BACKEND_DEFAULT equals to OPENCV_DNN_BACKEND_DEFAULT, which can be defined using CMake or a configuration parameter DNN_BACKEND_DEFAULT = 0, DNN_BACKEND_HALIDE, - DNN_BACKEND_INFERENCE_ENGINE, //!< Intel OpenVINO computational backend + DNN_BACKEND_INFERENCE_ENGINE, //!< Intel OpenVINO computational backend, supported targets: CPU, OPENCL, OPENCL_FP16, MYRIAD, HDDL, NPU //!< @note Tutorial how to build OpenCV with OpenVINO: @ref tutorial_dnn_openvino DNN_BACKEND_OPENCV, DNN_BACKEND_VKCOM, diff --git a/modules/dnn/src/ie_ngraph.cpp b/modules/dnn/src/ie_ngraph.cpp index 779cd1ebc0..ef5daa0b6c 100644 --- a/modules/dnn/src/ie_ngraph.cpp +++ b/modules/dnn/src/ie_ngraph.cpp @@ -223,6 +223,9 @@ void InfEngineNgraphNet::init(Target targetId) case DNN_TARGET_FPGA: device_name = "FPGA"; break; + case DNN_TARGET_NPU: + device_name = "NPU"; + break; default: CV_Error(Error::StsNotImplemented, "Unknown target"); }; diff --git a/modules/dnn/src/net_openvino.cpp b/modules/dnn/src/net_openvino.cpp index 501a596e5d..7c6666b78e 100644 --- a/modules/dnn/src/net_openvino.cpp +++ b/modules/dnn/src/net_openvino.cpp @@ -125,7 +125,8 @@ public: preferableTarget == DNN_TARGET_OPENCL_FP16 || preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL || - preferableTarget == DNN_TARGET_FPGA, + preferableTarget == DNN_TARGET_FPGA || + preferableTarget == DNN_TARGET_NPU, "Unknown OpenVINO target" ); } diff --git a/modules/dnn/src/op_inf_engine.cpp b/modules/dnn/src/op_inf_engine.cpp index 04f1da7c71..b4707434c1 100644 --- a/modules/dnn/src/op_inf_engine.cpp +++ b/modules/dnn/src/op_inf_engine.cpp @@ -275,6 +275,8 @@ bool checkTarget(Target target) return true; else if (std::string::npos != i->find("GPU") && (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) return true; + else if (std::string::npos != i->find("NPU") && target == DNN_TARGET_NPU) + return true; } return false; }