From b44302382dd3fc7c161e75f6f3118dce57b9860a Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 8 Jun 2020 21:03:05 +0000 Subject: [PATCH 1/9] ffmpeg/3.4: update FFmpeg wrapper - FFmpeg 3.4.7 --- 3rdparty/ffmpeg/ffmpeg.cmake | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/3rdparty/ffmpeg/ffmpeg.cmake b/3rdparty/ffmpeg/ffmpeg.cmake index 8e211b58ae..134547cbdd 100644 --- a/3rdparty/ffmpeg/ffmpeg.cmake +++ b/3rdparty/ffmpeg/ffmpeg.cmake @@ -1,8 +1,8 @@ -# Binaries branch name: ffmpeg/3.4_20200310 -# Binaries were created for OpenCV: 4966186e10e2a940514d8c20447ca4a828af5f46 -ocv_update(FFMPEG_BINARIES_COMMIT "e81ccda615672833b578c6cefdb859ad69c560ba") -ocv_update(FFMPEG_FILE_HASH_BIN32 "301ae2000e25f800ab8e0065f277ad28") -ocv_update(FFMPEG_FILE_HASH_BIN64 "d87ce032289c3f811d02f0c3d8dbe366") +# Binaries branch name: ffmpeg/3.4_20200608 +# Binaries were created for OpenCV: 458f1d5ebe31e22789d9d781d0ca2ca936758fde +ocv_update(FFMPEG_BINARIES_COMMIT "57064cd66d98994503b34aade3c8d8ff25007b46") +ocv_update(FFMPEG_FILE_HASH_BIN32 "6fff20f5617bd1b7362058790db52caa") +ocv_update(FFMPEG_FILE_HASH_BIN64 "15df55131471191b575668a424dff385") ocv_update(FFMPEG_FILE_HASH_CMAKE "3b90f67f4b429e77d3da36698cef700c") function(download_win_ffmpeg script_var) From 1b336bb602962c17011f9bcfe5cc5787dd7f54c9 Mon Sep 17 00:00:00 2001 From: Gourav Roy <34737471+themechanicalcoder@users.noreply.github.com> Date: Wed, 10 Jun 2020 12:23:18 +0530 Subject: [PATCH 2/9] Merge pull request #16955 from themechanicalcoder:text_recognition * add text recognition sample * fix pylint warning * made changes according to the c++ example * fix errors * add text recognition sample * update text detection sample --- samples/dnn/text_detection.py | 129 ++++++++++++++++++++++++++++------ 1 file changed, 107 insertions(+), 22 deletions(-) diff --git a/samples/dnn/text_detection.py b/samples/dnn/text_detection.py index 9ea4c10190..7014a80148 100644 --- a/samples/dnn/text_detection.py +++ b/samples/dnn/text_detection.py @@ -1,25 +1,81 @@ +''' + Text detection model: https://github.com/argman/EAST + Download link: https://www.dropbox.com/s/r2ingd0l3zt8hxs/frozen_east_text_detection.tar.gz?dl=1 + Text recognition model taken from here: https://github.com/meijieru/crnn.pytorch + How to convert from pb to onnx: + Using classes from here: https://github.com/meijieru/crnn.pytorch/blob/master/models/crnn.py + import torch + import models.crnn as CRNN + model = CRNN(32, 1, 37, 256) + model.load_state_dict(torch.load('crnn.pth')) + dummy_input = torch.randn(1, 1, 32, 100) + torch.onnx.export(model, dummy_input, "crnn.onnx", verbose=True) +''' + + # Import required modules +import numpy as np import cv2 as cv import math import argparse ############ Add argument parser for command line arguments ############ -parser = argparse.ArgumentParser(description='Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)') -parser.add_argument('--input', help='Path to input image or video file. Skip this argument to capture frames from a camera.') -parser.add_argument('--model', required=True, - help='Path to a binary .pb file of model contains trained weights.') +parser = argparse.ArgumentParser( + description="Use this script to run TensorFlow implementation (https://github.com/argman/EAST) of " + "EAST: An Efficient and Accurate Scene Text Detector (https://arxiv.org/abs/1704.03155v2)" + "The OCR model can be obtained from converting the pretrained CRNN model to .onnx format from the github repository https://github.com/meijieru/crnn.pytorch") +parser.add_argument('--input', + help='Path to input image or video file. Skip this argument to capture frames from a camera.') +parser.add_argument('--model', '-m', required=True, + help='Path to a binary .pb file contains trained detector network.') +parser.add_argument('--ocr', default="crnn.onnx", + help="Path to a binary .pb or .onnx file contains trained recognition network", ) parser.add_argument('--width', type=int, default=320, help='Preprocess input image by resizing to a specific width. It should be multiple by 32.') -parser.add_argument('--height',type=int, default=320, +parser.add_argument('--height', type=int, default=320, help='Preprocess input image by resizing to a specific height. It should be multiple by 32.') -parser.add_argument('--thr',type=float, default=0.5, +parser.add_argument('--thr', type=float, default=0.5, help='Confidence threshold.') -parser.add_argument('--nms',type=float, default=0.4, +parser.add_argument('--nms', type=float, default=0.4, help='Non-maximum suppression threshold.') args = parser.parse_args() + ############ Utility functions ############ -def decode(scores, geometry, scoreThresh): + +def fourPointsTransform(frame, vertices): + vertices = np.asarray(vertices) + outputSize = (100, 32) + targetVertices = np.array([ + [0, outputSize[1] - 1], + [0, 0], + [outputSize[0] - 1, 0], + [outputSize[0] - 1, outputSize[1] - 1]], dtype="float32") + + rotationMatrix = cv.getPerspectiveTransform(vertices, targetVertices) + result = cv.warpPerspective(frame, rotationMatrix, outputSize) + return result + + +def decodeText(scores): + text = "" + alphabet = "0123456789abcdefghijklmnopqrstuvwxyz" + for i in range(scores.shape[0]): + c = np.argmax(scores[i][0]) + if c != 0: + text += alphabet[c - 1] + else: + text += '-' + + # adjacent same letters as well as background text must be removed to get the final output + char_list = [] + for i in range(len(text)): + if text[i] != '-' and (not (i > 0 and text[i] == text[i - 1])): + char_list.append(text[i]) + return ''.join(char_list) + + +def decodeBoundingBoxes(scores, geometry, scoreThresh): detections = [] confidences = [] @@ -47,7 +103,7 @@ def decode(scores, geometry, scoreThresh): score = scoresData[x] # If score is lower than threshold score, move to next x - if(score < scoreThresh): + if (score < scoreThresh): continue # Calculate offset @@ -66,24 +122,27 @@ def decode(scores, geometry, scoreThresh): # Find points for rectangle p1 = (-sinA * h + offset[0], -cosA * h + offset[1]) - p3 = (-cosA * w + offset[0], sinA * w + offset[1]) - center = (0.5*(p1[0]+p3[0]), 0.5*(p1[1]+p3[1])) - detections.append((center, (w,h), -1*angle * 180.0 / math.pi)) + p3 = (-cosA * w + offset[0], sinA * w + offset[1]) + center = (0.5 * (p1[0] + p3[0]), 0.5 * (p1[1] + p3[1])) + detections.append((center, (w, h), -1 * angle * 180.0 / math.pi)) confidences.append(float(score)) # Return detections and confidences return [detections, confidences] + def main(): # Read and store arguments confThreshold = args.thr nmsThreshold = args.nms inpWidth = args.width inpHeight = args.height - model = args.model + modelDetector = args.model + modelRecognition = args.ocr # Load network - net = cv.dnn.readNet(model) + detector = cv.dnn.readNet(modelDetector) + recognizer = cv.dnn.readNet(modelRecognition) # Create a new named window kWinName = "EAST: An Efficient and Accurate Scene Text Detector" @@ -95,6 +154,7 @@ def main(): # Open a video file or an image file or a camera stream cap = cv.VideoCapture(args.input if args.input else 0) + tickmeter = cv.TickMeter() while cv.waitKey(1) < 0: # Read frame hasFrame, frame = cap.read() @@ -111,19 +171,20 @@ def main(): # Create a 4D blob from frame. blob = cv.dnn.blobFromImage(frame, 1.0, (inpWidth, inpHeight), (123.68, 116.78, 103.94), True, False) - # Run the model - net.setInput(blob) - outs = net.forward(outNames) - t, _ = net.getPerfProfile() - label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency()) + # Run the detection model + detector.setInput(blob) + + tickmeter.start() + outs = detector.forward(outNames) + tickmeter.stop() # Get scores and geometry scores = outs[0] geometry = outs[1] - [boxes, confidences] = decode(scores, geometry, confThreshold) + [boxes, confidences] = decodeBoundingBoxes(scores, geometry, confThreshold) # Apply NMS - indices = cv.dnn.NMSBoxesRotated(boxes, confidences, confThreshold,nmsThreshold) + indices = cv.dnn.NMSBoxesRotated(boxes, confidences, confThreshold, nmsThreshold) for i in indices: # get 4 corners of the rotated rect vertices = cv.boxPoints(boxes[i[0]]) @@ -131,16 +192,40 @@ def main(): for j in range(4): vertices[j][0] *= rW vertices[j][1] *= rH + + + # get cropped image using perspective transform + if modelRecognition: + cropped = fourPointsTransform(frame, vertices) + cropped = cv.cvtColor(cropped, cv.COLOR_BGR2GRAY) + + # Create a 4D blob from cropped image + blob = cv.dnn.blobFromImage(cropped, size=(100, 32), mean=127.5, scalefactor=1 / 127.5) + recognizer.setInput(blob) + + # Run the recognition model + tickmeter.start() + result = recognizer.forward() + tickmeter.stop() + + # decode the result into text + wordRecognized = decodeText(result) + cv.putText(frame, wordRecognized, (int(vertices[1][0]), int(vertices[1][1])), cv.FONT_HERSHEY_SIMPLEX, + 0.5, (255, 0, 0)) + for j in range(4): p1 = (vertices[j][0], vertices[j][1]) p2 = (vertices[(j + 1) % 4][0], vertices[(j + 1) % 4][1]) cv.line(frame, p1, p2, (0, 255, 0), 1) # Put efficiency information + label = 'Inference time: %.2f ms' % (tickmeter.getTimeMilli()) cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0)) # Display the frame - cv.imshow(kWinName,frame) + cv.imshow(kWinName, frame) + tickmeter.reset() + if __name__ == "__main__": main() From 781fbde44989dfbff69e532d5be2b9d2e35b2f10 Mon Sep 17 00:00:00 2001 From: Rasmus Date: Wed, 10 Jun 2020 09:53:18 +0200 Subject: [PATCH 3/9] Merge pull request #17368 from themightyoarfish:cv2eigen-doc * Add documentation about usage of cv2eigen functions in eigen.hpp * Fixed Doxygen syntax. Co-authored-by: Alexander Smorkalov --- modules/core/include/opencv2/core/eigen.hpp | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/modules/core/include/opencv2/core/eigen.hpp b/modules/core/include/opencv2/core/eigen.hpp index 8afc06caa7..3d7ba8fa14 100644 --- a/modules/core/include/opencv2/core/eigen.hpp +++ b/modules/core/include/opencv2/core/eigen.hpp @@ -66,10 +66,18 @@ namespace cv { -//! @addtogroup core_eigen +/** @addtogroup core_eigen +These functions are provided for OpenCV-Eigen interoperability. They convert `Mat` +objects to corresponding `Eigen::Matrix` objects and vice-versa. Consult the [Eigen +documentation](https://eigen.tuxfamily.org/dox/group__TutorialMatrixClass.html) for +information about the `Matrix` template type. + +@note Using these functions requires the `Eigen/Dense` or similar header to be +included before this header. +*/ //! @{ -#ifdef OPENCV_EIGEN_TENSOR_SUPPORT +#if defined(OPENCV_EIGEN_TENSOR_SUPPORT) || defined(CV_DOXYGEN) /** @brief Converts an Eigen::Tensor to a cv::Mat. The method converts an Eigen::Tensor with shape (H x W x C) to a cv::Mat where: From 9096b1c76864955bc5dbb8918f66def822cad606 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Mon, 8 Jun 2020 21:57:27 +0300 Subject: [PATCH 4/9] dnn/NGraph: added nullptr checks --- modules/dnn/src/dnn.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 1eb1055966..c7ed6a4c17 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -2023,6 +2023,7 @@ struct Net::Impl : public detail::NetImplBase Ptr inpNode = inpLd.backendNodes[preferableBackend]; if (!inpNode.empty()) { Ptr ieNode = inpNode.dynamicCast(); + CV_Assert(!ieNode.empty()); ieNode->net->setUnconnectedNodes(ieNode); } } @@ -2067,6 +2068,7 @@ struct Net::Impl : public detail::NetImplBase int cons_inp = cons->oid; Ptr inpWrapper = inpLd.outputBlobsWrappers[cons_inp]. dynamicCast(); + CV_Assert(!inpWrapper.empty()); auto iter = std::find(inputNames.begin(), inputNames.end(), inpWrapper->dataPtr->getName()); if (iter == inputNames.end()) { From 7a187e9b5ef0c58ac2c276851efb09b96fcfe857 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Mon, 8 Jun 2020 16:39:23 +0300 Subject: [PATCH 5/9] QRDetectMulti: refactored checkPoints method --- modules/objdetect/src/qrcode.cpp | 75 ++++++++++++-------------------- 1 file changed, 28 insertions(+), 47 deletions(-) diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp index 98d4286f0d..5b4bb61e9e 100644 --- a/modules/objdetect/src/qrcode.cpp +++ b/modules/objdetect/src/qrcode.cpp @@ -17,6 +17,7 @@ #include #include #include +#include namespace cv { @@ -1293,7 +1294,7 @@ protected: void deleteUsedPoints(vector >& true_points_group, vector >& loc, vector& tmp_localization_points); void fixationPoints(vector &local_point); - bool checkPoints(const vector& quadrangle_points); + bool checkPoints(vector quadrangle_points); bool checkPointsInsideQuadrangle(const vector& quadrangle_points); bool checkPointsInsideTriangle(const vector& triangle_points); @@ -1571,59 +1572,39 @@ void QRDetectMulti::fixationPoints(vector &local_point) } } -bool QRDetectMulti::checkPoints(const vector& quadrangle_points) +class BWCounter { - if (quadrangle_points.size() != 4) - return false; - vector quadrangle = quadrangle_points; - std::sort(quadrangle.begin(), quadrangle.end(), compareDistanse_y()); - LineIterator it1(bin_barcode_fullsize, quadrangle[1], quadrangle[0]); - LineIterator it2(bin_barcode_fullsize, quadrangle[2], quadrangle[0]); - LineIterator it3(bin_barcode_fullsize, quadrangle[1], quadrangle[3]); - LineIterator it4(bin_barcode_fullsize, quadrangle[2], quadrangle[3]); - vector list_line_iter; - list_line_iter.push_back(it1); - list_line_iter.push_back(it2); - list_line_iter.push_back(it3); - list_line_iter.push_back(it4); - int count_w = 0; - int count_b = 0; - for (int j = 0; j < 3; j +=2) + size_t white; + size_t black; +public: + BWCounter(size_t b = 0, size_t w = 0) : white(w), black(b) {} + BWCounter& operator+=(const BWCounter& other) { black += other.black; white += other.white; return *this; } + void count1(uchar pixel) { if (pixel == 255) white++; else if (pixel == 0) black++; } + double getBWFraction() const { return white == 0 ? std::numeric_limits::infinity() : double(black) / double(white); } + static BWCounter checkOnePair(const Point2f& tl, const Point2f& tr, const Point2f& bl, const Point2f& br, const Mat& img) { - LineIterator& li = list_line_iter[j]; - LineIterator& li2 = list_line_iter[j + 1]; - for (int i = 0; i < li.count; i++) + BWCounter res; + LineIterator li1(img, tl, tr), li2(img, bl, br); + for (int i = 0; i < li1.count && i < li2.count; i++, li1++, li2++) { - - Point pt1 = li.pos(); - Point pt2 = li2.pos(); - LineIterator it0(bin_barcode_fullsize, pt1, pt2); - for (int r = 0; r < it0.count; r++) - { - int pixel = bin_barcode.at(it0.pos().y , it0.pos().x); - if (pixel == 255) - { - count_w++; - } - if (pixel == 0) - { - count_b++; - } - it0++; - } - li++; - li2++; + LineIterator it(img, li1.pos(), li2.pos()); + for (int r = 0; r < it.count; r++, it++) + res.count1(img.at(it.pos())); } + return res; } - if (count_w == 0) - return false; +}; - double frac = double(count_b) / double(count_w); - double bottom_bound = 0.76; - double upper_bound = 1.24; - if ((frac <= bottom_bound) || (frac >= upper_bound)) +bool QRDetectMulti::checkPoints(vector quadrangle) +{ + if (quadrangle.size() != 4) return false; - return true; + std::sort(quadrangle.begin(), quadrangle.end(), compareDistanse_y()); + BWCounter s; + s += BWCounter::checkOnePair(quadrangle[1], quadrangle[0], quadrangle[2], quadrangle[0], bin_barcode); + s += BWCounter::checkOnePair(quadrangle[1], quadrangle[3], quadrangle[2], quadrangle[3], bin_barcode); + const double frac = s.getBWFraction(); + return frac > 0.76 && frac < 1.24; } bool QRDetectMulti::checkPointsInsideQuadrangle(const vector& quadrangle_points) From 0417c8c9c715209957268c883aaa9fb592ec433c Mon Sep 17 00:00:00 2001 From: Jessica Wong <39626651+wongjessica@users.noreply.github.com> Date: Thu, 21 May 2020 08:36:19 -0400 Subject: [PATCH 6/9] Added information to OpenCV documentation [MacOS] Added and Edited specific information to the "Installation in MacOS" OpenCV documentation. Closes #17340 --- .../introduction/macos_install/macos_install.markdown | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/tutorials/introduction/macos_install/macos_install.markdown b/doc/tutorials/introduction/macos_install/macos_install.markdown index 4db1cbfc86..fcb0618c9b 100644 --- a/doc/tutorials/introduction/macos_install/macos_install.markdown +++ b/doc/tutorials/introduction/macos_install/macos_install.markdown @@ -30,7 +30,7 @@ Installing CMake -# Install the dmg package and launch it from Applications. That will give you the UI app of CMake --# From the CMake app window, choose menu Tools --> Install For Command Line Use. +-# From the CMake app window, choose menu Tools --> How to Install For Command Line Use. Then, follow the instructions from the pop-up there. -# Install folder will be /usr/bin/ by default, submit it by choosing Install command line links. @@ -64,7 +64,7 @@ git clone https://github.com/opencv/opencv_contrib.git Building OpenCV from Source Using CMake --------------------------------------- --# Create a temporary directory, which we denote as ``, where you want to put +-# Create a temporary directory, which we denote as `build_opencv`, where you want to put the generated Makefiles, project files as well the object files and output binaries and enter there. @@ -85,8 +85,8 @@ Building OpenCV from Source Using CMake or cmake-gui - - set full path to OpenCV source code, e.g. `/home/user/opencv` - - set full path to ``, e.g. `/home/user/build_opencv` + - set the OpenCV source code path to, e.g. `/home/user/opencv` + - set the binary build path to your CMake build directory, e.g. `/home/user/build_opencv` - set optional parameters - run: "Configure" - run: "Generate" From db3e3be8ee259908248c9ffb5e67056cb3b94756 Mon Sep 17 00:00:00 2001 From: cyy Date: Sat, 29 Feb 2020 18:59:43 +0800 Subject: [PATCH 7/9] improve the mkl search procedure --- cmake/OpenCVFindMKL.cmake | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/cmake/OpenCVFindMKL.cmake b/cmake/OpenCVFindMKL.cmake index 19a76ddf57..141481ed42 100644 --- a/cmake/OpenCVFindMKL.cmake +++ b/cmake/OpenCVFindMKL.cmake @@ -79,9 +79,10 @@ get_mkl_version(${MKL_INCLUDE_DIRS}/mkl_version.h) #determine arch if(CMAKE_CXX_SIZEOF_DATA_PTR EQUAL 8) - set(MKL_X64 1) - set(MKL_ARCH "intel64") - + set(MKL_ARCH_LIST "intel64") + if(MSVC) + list(APPEND MKL_ARCH_LIST "win-x64") + endif() include(CheckTypeSize) CHECK_TYPE_SIZE(int _sizeof_int) if (_sizeof_int EQUAL 4) @@ -90,14 +91,19 @@ if(CMAKE_CXX_SIZEOF_DATA_PTR EQUAL 8) set(MKL_ARCH_SUFFIX "ilp64") endif() else() - set(MKL_ARCH "ia32") + set(MKL_ARCH_LIST "ia32") set(MKL_ARCH_SUFFIX "c") endif() if(MKL_VERSION_STR VERSION_GREATER "11.3.0" OR MKL_VERSION_STR VERSION_EQUAL "11.3.0") set(mkl_lib_find_paths - ${MKL_ROOT_DIR}/lib - ${MKL_ROOT_DIR}/lib/${MKL_ARCH} ${MKL_ROOT_DIR}/../tbb/lib/${MKL_ARCH}) + ${MKL_ROOT_DIR}/lib) + foreach(MKL_ARCH ${MKL_ARCH_LIST}) + list(APPEND mkl_lib_find_paths + ${MKL_ROOT_DIR}/lib/${MKL_ARCH} + ${MKL_ROOT_DIR}/../tbb/lib/${MKL_ARCH} + ${MKL_ROOT_DIR}/${MKL_ARCH}) + endforeach() set(mkl_lib_list "mkl_intel_${MKL_ARCH_SUFFIX}") @@ -121,7 +127,7 @@ endif() set(MKL_LIBRARIES "") foreach(lib ${mkl_lib_list}) - find_library(${lib} ${lib} ${mkl_lib_find_paths}) + find_library(${lib} NAMES ${lib} ${lib}_dll HINTS ${mkl_lib_find_paths}) mark_as_advanced(${lib}) if(NOT ${lib}) mkl_fail() From d2a9efd03931ebac88734c0f9753186c3da6385f Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 7 Apr 2020 16:17:21 +0300 Subject: [PATCH 8/9] dnn: use OpenVINO 2020.3 defines --- cmake/OpenCVDetectInferenceEngine.cmake | 4 ++-- modules/dnn/src/dnn.cpp | 2 +- modules/dnn/src/ie_ngraph.cpp | 6 +++--- modules/dnn/src/op_inf_engine.hpp | 5 +++-- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/cmake/OpenCVDetectInferenceEngine.cmake b/cmake/OpenCVDetectInferenceEngine.cmake index 7eb81d6ef5..c0379c32b3 100644 --- a/cmake/OpenCVDetectInferenceEngine.cmake +++ b/cmake/OpenCVDetectInferenceEngine.cmake @@ -135,9 +135,9 @@ endif() if(INF_ENGINE_TARGET) if(NOT INF_ENGINE_RELEASE) - message(WARNING "InferenceEngine version has not been set, 2020.2 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.") + message(WARNING "InferenceEngine version has not been set, 2020.3 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.") endif() - set(INF_ENGINE_RELEASE "2020020000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)") + set(INF_ENGINE_RELEASE "2020030000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)") set_target_properties(${INF_ENGINE_TARGET} PROPERTIES INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}" ) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index c7ed6a4c17..da1e901fa3 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -2113,7 +2113,7 @@ struct Net::Impl : public detail::NetImplBase auto ieInpNode = inputNodes[i].dynamicCast(); CV_Assert(oid < ieInpNode->node->get_output_size()); -#if INF_ENGINE_VER_MAJOR_GT(2020030000) +#if INF_ENGINE_VER_MAJOR_GT(INF_ENGINE_RELEASE_2020_3) inputNodes[i] = Ptr(new InfEngineNgraphNode(ieInpNode->node->get_output_as_single_output_node(oid))); #else inputNodes[i] = Ptr(new InfEngineNgraphNode(ieInpNode->node->get_output_as_single_output_node(oid, false))); diff --git a/modules/dnn/src/ie_ngraph.cpp b/modules/dnn/src/ie_ngraph.cpp index cf94500a8c..42a4259646 100644 --- a/modules/dnn/src/ie_ngraph.cpp +++ b/modules/dnn/src/ie_ngraph.cpp @@ -82,7 +82,7 @@ public: return type_info; } -#if INF_ENGINE_VER_MAJOR_GT(2020020000) +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_3) NgraphCustomOp(const ngraph::OutputVector& inputs, #else NgraphCustomOp(const ngraph::NodeVector& inputs, @@ -112,7 +112,7 @@ public: std::shared_ptr copy_with_new_args(const ngraph::NodeVector& new_args) const override { -#if INF_ENGINE_VER_MAJOR_GT(2020020000) +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_3) return std::make_shared(ngraph::as_output_vector(new_args), params); #else return std::make_shared(new_args, params); @@ -283,7 +283,7 @@ InfEngineNgraphNode::InfEngineNgraphNode(const std::vector >& n {"internals", shapesToStr(internals)} }; -#if INF_ENGINE_VER_MAJOR_GT(2020020000) +#if INF_ENGINE_VER_MAJOR_GE(INF_ENGINE_RELEASE_2020_3) ngraph::OutputVector inp_nodes; #else ngraph::NodeVector inp_nodes; diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index a256989f96..f39c4ee23b 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -25,10 +25,11 @@ #define INF_ENGINE_RELEASE_2019R3 2019030000 #define INF_ENGINE_RELEASE_2020_1 2020010000 #define INF_ENGINE_RELEASE_2020_2 2020020000 +#define INF_ENGINE_RELEASE_2020_3 2020030000 #ifndef INF_ENGINE_RELEASE -#warning("IE version have not been provided via command-line. Using 2020.2 by default") -#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2020_2 +#warning("IE version have not been provided via command-line. Using 2020.3 by default") +#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2020_3 #endif #define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000)) From 9697e3ac245f32f1c6cd830fb59933dea0f40a5c Mon Sep 17 00:00:00 2001 From: Ilya Lavrenov Date: Fri, 12 Jun 2020 20:27:21 +0300 Subject: [PATCH 9/9] Removed error lisneter usage --- modules/dnn/src/ie_ngraph.cpp | 2 ++ modules/dnn/src/op_inf_engine.hpp | 2 ++ 2 files changed, 4 insertions(+) diff --git a/modules/dnn/src/ie_ngraph.cpp b/modules/dnn/src/ie_ngraph.cpp index cf94500a8c..82cb10eae9 100644 --- a/modules/dnn/src/ie_ngraph.cpp +++ b/modules/dnn/src/ie_ngraph.cpp @@ -239,7 +239,9 @@ private: class InfEngineNgraphExtension : public InferenceEngine::IExtension { public: +#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2) virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {} +#endif virtual void Unload() noexcept {} virtual void Release() noexcept {} virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {} diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index a256989f96..337b4fbcb1 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -225,7 +225,9 @@ private: class InfEngineExtension : public InferenceEngine::IExtension { public: +#if INF_ENGINE_VER_MAJOR_LT(INF_ENGINE_RELEASE_2020_2) virtual void SetLogCallback(InferenceEngine::IErrorListener&) noexcept {} +#endif virtual void Unload() noexcept {} virtual void Release() noexcept {} virtual void GetVersion(const InferenceEngine::Version*&) const noexcept {}