diff --git a/samples/dnn/classification.cpp b/samples/dnn/classification.cpp index 8440371688..769d6874be 100644 --- a/samples/dnn/classification.cpp +++ b/samples/dnn/classification.cpp @@ -22,12 +22,17 @@ std::string keys = "0: automatically (by default), " "1: Halide language (http://halide-lang.org/), " "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "3: OpenCV implementation }" + "3: OpenCV implementation, " + "4: VKCOM, " + "5: CUDA }," "{ target | 0 | Choose one of target computation devices: " "0: CPU target (by default), " "1: OpenCL, " "2: OpenCL fp16 (half-float precision), " - "3: VPU }"; + "3: VPU, " + "4: Vulkan, " + "6: CUDA, " + "7: CUDA fp16 (half-float preprocess) }"; using namespace cv; using namespace dnn; diff --git a/samples/dnn/classification.py b/samples/dnn/classification.py index 558c8b0bdc..be639e8d74 100644 --- a/samples/dnn/classification.py +++ b/samples/dnn/classification.py @@ -7,9 +7,9 @@ from common import * def get_args_parser(func_args): backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, - cv.dnn.DNN_BACKEND_OPENCV) + cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, - cv.dnn.DNN_TARGET_HDDL) + cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'), @@ -32,14 +32,19 @@ def get_args_parser(func_args): "%d: automatically (by default), " "%d: Halide language (http://halide-lang.org/), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "%d: OpenCV implementation" % backends) + "%d: OpenCV implementation, " + "%d: VKCOM, " + "%d: CUDA" % backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: NCS2 VPU, ' - '%d: HDDL VPU' % targets) + '%d: HDDL VPU, ' + '%d: Vulkan, ' + '%d: CUDA, ' + '%d: CUDA fp16 (half-float preprocess)'% targets) args, _ = parser.parse_known_args() add_preproc_args(args.zoo, parser, 'classification') diff --git a/samples/dnn/dasiamrpn_tracker.cpp b/samples/dnn/dasiamrpn_tracker.cpp index e6c05ec757..f6e307c682 100644 --- a/samples/dnn/dasiamrpn_tracker.cpp +++ b/samples/dnn/dasiamrpn_tracker.cpp @@ -27,12 +27,17 @@ const char *keys = "0: automatically (by default), " "1: Halide language (http://halide-lang.org/), " "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "3: OpenCV implementation }" + "3: OpenCV implementation, " + "4: VKCOM, " + "5: CUDA }," "{ target | 0 | Choose one of target computation devices: " "0: CPU target (by default), " "1: OpenCL, " "2: OpenCL fp16 (half-float precision), " - "3: VPU }" + "3: VPU, " + "4: Vulkan, " + "6: CUDA, " + "7: CUDA fp16 (half-float preprocess) }" ; static diff --git a/samples/dnn/human_parsing.cpp b/samples/dnn/human_parsing.cpp index bf2cc294c8..0c00c02841 100644 --- a/samples/dnn/human_parsing.cpp +++ b/samples/dnn/human_parsing.cpp @@ -78,12 +78,17 @@ int main(int argc, char**argv) "0: automatically (by default), " "1: Halide language (http://halide-lang.org/), " "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "3: OpenCV implementation }" + "3: OpenCV implementation, " + "4: VKCOM, " + "5: CUDA }" "{target t | 0 | Choose one of target computation devices: " "0: CPU target (by default), " "1: OpenCL, " "2: OpenCL fp16 (half-float precision), " - "3: VPU }" + "3: VPU, " + "4: Vulkan, " + "6: CUDA, " + "7: CUDA fp16 (half-float preprocess) }" ); if (argc == 1 || parser.has("help")) { diff --git a/samples/dnn/human_parsing.py b/samples/dnn/human_parsing.py index 09371fe4a9..237f764b95 100644 --- a/samples/dnn/human_parsing.py +++ b/samples/dnn/human_parsing.py @@ -45,8 +45,10 @@ import numpy as np import cv2 as cv -backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) -targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL) +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, + cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, + cv.dnn.DNN_TARGET_HDDL, cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) def preprocess(image): @@ -162,14 +164,19 @@ if __name__ == '__main__': help="Choose one of computation backends: " "%d: automatically (by default), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "%d: OpenCV implementation" % backends) + "%d: OpenCV implementation, " + "%d: VKCOM, " + "%d: CUDA"% backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: NCS2 VPU, ' - '%d: HDDL VPU' % targets) + '%d: HDDL VPU, ' + '%d: Vulkan, ' + '%d: CUDA, ' + '%d: CUDA fp16 (half-float preprocess)' % targets) args, _ = parser.parse_known_args() if not os.path.isfile(args.model): diff --git a/samples/dnn/object_detection.cpp b/samples/dnn/object_detection.cpp index 796e729ece..5ff112fe5d 100644 --- a/samples/dnn/object_detection.cpp +++ b/samples/dnn/object_detection.cpp @@ -27,12 +27,17 @@ std::string keys = "0: automatically (by default), " "1: Halide language (http://halide-lang.org/), " "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "3: OpenCV implementation }" + "3: OpenCV implementation, " + "4: VKCOM, " + "5: CUDA }" "{ target | 0 | Choose one of target computation devices: " "0: CPU target (by default), " "1: OpenCL, " "2: OpenCL fp16 (half-float precision), " - "3: VPU }" + "3: VPU, " + "4: Vulkan, " + "6: CUDA, " + "7: CUDA fp16 (half-float preprocess) }" "{ async | 0 | Number of asynchronous forwards at the same time. " "Choose 0 for synchronous mode }"; diff --git a/samples/dnn/object_detection.py b/samples/dnn/object_detection.py index ec8bf82866..0ca5586159 100644 --- a/samples/dnn/object_detection.py +++ b/samples/dnn/object_detection.py @@ -14,8 +14,10 @@ from tf_text_graph_common import readTextMessage from tf_text_graph_ssd import createSSDGraph from tf_text_graph_faster_rcnn import createFasterRCNNGraph -backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) -targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL) +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, + cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, + cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'), @@ -35,14 +37,19 @@ parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DE "%d: automatically (by default), " "%d: Halide language (http://halide-lang.org/), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "%d: OpenCV implementation" % backends) + "%d: OpenCV implementation, " + "%d: VKCOM, " + "%d: CUDA" % backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: NCS2 VPU, ' - '%d: HDDL VPU' % targets) + '%d: HDDL VPU, ' + '%d: Vulkan, ' + '%d: CUDA, ' + '%d: CUDA fp16 (half-float preprocess)' % targets) parser.add_argument('--async', type=int, default=0, dest='asyncN', help='Number of asynchronous forwards at the same time. ' diff --git a/samples/dnn/person_reid.cpp b/samples/dnn/person_reid.cpp index 23b766114c..f0c22e96ad 100644 --- a/samples/dnn/person_reid.cpp +++ b/samples/dnn/person_reid.cpp @@ -36,13 +36,15 @@ const char* keys = "0: automatically (by default), " "1: Halide language (http://halide-lang.org/), " "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " -"3: OpenCV implementation ," +"3: OpenCV implementation, " +"4: VKCOM, " "5: CUDA }" "{target t | 0 | choose one of target computation devices: " "0: CPU target (by default), " "1: OpenCL, " "2: OpenCL fp16 (half-float precision), " -"6: CUDA ," +"4: Vulkan, " +"6: CUDA, " "7: CUDA fp16 (half-float preprocess) }"; namespace cv{ diff --git a/samples/dnn/person_reid.py b/samples/dnn/person_reid.py index 502f126bd5..08f04faa52 100644 --- a/samples/dnn/person_reid.py +++ b/samples/dnn/person_reid.py @@ -21,6 +21,7 @@ import cv2 as cv backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, + cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) targets = (cv.dnn.DNN_TARGET_CPU, @@ -28,6 +29,7 @@ targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, + cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) @@ -212,7 +214,8 @@ if __name__ == '__main__': help="Choose one of computation backends: " "%d: automatically (by default), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "%d: OpenCV implementation" + "%d: OpenCV implementation, " + "%d: VKCOM, " "%d: CUDA backend"% backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' @@ -220,9 +223,10 @@ if __name__ == '__main__': '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: NCS2 VPU, ' - '%d: HDDL VPU' - '%d: CUDA,' - '%d: CUDA FP16,' + '%d: HDDL VPU, ' + '%d: Vulkan, ' + '%d: CUDA, ' + '%d: CUDA FP16' % targets) args, _ = parser.parse_known_args() diff --git a/samples/dnn/segmentation.cpp b/samples/dnn/segmentation.cpp index d9fbad8974..777badf51e 100644 --- a/samples/dnn/segmentation.cpp +++ b/samples/dnn/segmentation.cpp @@ -21,12 +21,17 @@ std::string keys = "0: automatically (by default), " "1: Halide language (http://halide-lang.org/), " "2: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "3: OpenCV implementation }" + "3: OpenCV implementation, " + "4: VKCOM, " + "5: CUDA }" "{ target | 0 | Choose one of target computation devices: " "0: CPU target (by default), " "1: OpenCL, " "2: OpenCL fp16 (half-float precision), " - "3: VPU }"; + "3: VPU, " + "4: Vulkan, " + "6: CUDA, " + "7: CUDA fp16 (half-float preprocess) }"; using namespace cv; using namespace dnn; diff --git a/samples/dnn/segmentation.py b/samples/dnn/segmentation.py index 8eeb59ba14..09f3f8dd11 100644 --- a/samples/dnn/segmentation.py +++ b/samples/dnn/segmentation.py @@ -5,8 +5,10 @@ import sys from common import * -backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) -targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL) +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, + cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, + cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) parser = argparse.ArgumentParser(add_help=False) parser.add_argument('--zoo', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'models.yml'), @@ -22,14 +24,19 @@ parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DE "%d: automatically (by default), " "%d: Halide language (http://halide-lang.org/), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "%d: OpenCV implementation" % backends) + "%d: OpenCV implementation, " + "%d: VKCOM, " + "%d: CUDA"% backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: NCS2 VPU, ' - '%d: HDDL VPU' % targets) + '%d: HDDL VPU, ' + '%d: Vulkan, ' + '%d: CUDA, ' + '%d: CUDA fp16 (half-float preprocess)'% targets) args, _ = parser.parse_known_args() add_preproc_args(args.zoo, parser, 'segmentation') parser = argparse.ArgumentParser(parents=[parser], diff --git a/samples/dnn/siamrpnpp.py b/samples/dnn/siamrpnpp.py index c7c49b1b85..2e15ec6708 100644 --- a/samples/dnn/siamrpnpp.py +++ b/samples/dnn/siamrpnpp.py @@ -327,9 +327,11 @@ def main(): """ Sample SiamRPN Tracker """ # Computation backends supported by layers - backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) + backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, + cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) # Target Devices for computation - targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD) + targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, + cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) parser = argparse.ArgumentParser(description='Use this script to run SiamRPN++ Visual Tracker', formatter_class=argparse.ArgumentDefaultsHelpFormatter) @@ -338,17 +340,22 @@ def main(): parser.add_argument('--search_net', type=str, default='search_net.onnx', help='Path to part of SiamRPN++ ran on search frame.') parser.add_argument('--rpn_head', type=str, default='rpn_head.onnx', help='Path to RPN Head ONNX model.') parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DEFAULT, type=int, - help='Select a computation backend: ' - "%d: automatically (by default) " - "%d: Halide" - "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit)" - "%d: OpenCV Implementation" % backends) + help="Select a computation backend: " + "%d: automatically (by default), " + "%d: Halide, " + "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " + "%d: OpenCV Implementation, " + "%d: VKCOM, " + "%d: CUDA" % backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Select a target device: ' - "%d: CPU target (by default)" - "%d: OpenCL" - "%d: OpenCL FP16" - "%d: Myriad" % targets) + '%d: CPU target (by default), ' + '%d: OpenCL, ' + '%d: OpenCL FP16, ' + '%d: Myriad, ' + '%d: Vulkan, ' + '%d: CUDA, ' + '%d: CUDA fp16 (half-float preprocess)' % targets) args, _ = parser.parse_known_args() if args.input_video and not os.path.isfile(args.input_video): diff --git a/samples/dnn/virtual_try_on.py b/samples/dnn/virtual_try_on.py index 076cb21d5b..e46f7ece50 100644 --- a/samples/dnn/virtual_try_on.py +++ b/samples/dnn/virtual_try_on.py @@ -16,8 +16,10 @@ from numpy import linalg from common import findFile from human_parsing import parse_human -backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV) -targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL) +backends = (cv.dnn.DNN_BACKEND_DEFAULT, cv.dnn.DNN_BACKEND_HALIDE, cv.dnn.DNN_BACKEND_INFERENCE_ENGINE, cv.dnn.DNN_BACKEND_OPENCV, + cv.dnn.DNN_BACKEND_VKCOM, cv.dnn.DNN_BACKEND_CUDA) +targets = (cv.dnn.DNN_TARGET_CPU, cv.dnn.DNN_TARGET_OPENCL, cv.dnn.DNN_TARGET_OPENCL_FP16, cv.dnn.DNN_TARGET_MYRIAD, cv.dnn.DNN_TARGET_HDDL, + cv.dnn.DNN_TARGET_VULKAN, cv.dnn.DNN_TARGET_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16) parser = argparse.ArgumentParser(description='Use this script to run virtial try-on using CP-VTON', formatter_class=argparse.ArgumentDefaultsHelpFormatter) @@ -33,14 +35,19 @@ parser.add_argument('--backend', choices=backends, default=cv.dnn.DNN_BACKEND_DE "%d: automatically (by default), " "%d: Halide language (http://halide-lang.org/), " "%d: Intel's Deep Learning Inference Engine (https://software.intel.com/openvino-toolkit), " - "%d: OpenCV implementation" % backends) + "%d: OpenCV implementation, " + "%d: VKCOM, " + "%d: CUDA" % backends) parser.add_argument('--target', choices=targets, default=cv.dnn.DNN_TARGET_CPU, type=int, help='Choose one of target computation devices: ' '%d: CPU target (by default), ' '%d: OpenCL, ' '%d: OpenCL fp16 (half-float precision), ' '%d: NCS2 VPU, ' - '%d: HDDL VPU' % targets) + '%d: HDDL VPU, ' + '%d: Vulkan, ' + '%d: CUDA, ' + '%d: CUDA fp16 (half-float preprocess)'% targets) args, _ = parser.parse_known_args()