From dfb9832a25e8d1852e158bbcead941d3a5941343 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 15 Apr 2019 22:28:33 +0000 Subject: [PATCH 001/152] cmake(protobuf): ensure using of own headers --- 3rdparty/protobuf/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/3rdparty/protobuf/CMakeLists.txt b/3rdparty/protobuf/CMakeLists.txt index ada9891a7b..7e0ea7df70 100644 --- a/3rdparty/protobuf/CMakeLists.txt +++ b/3rdparty/protobuf/CMakeLists.txt @@ -139,6 +139,7 @@ append_if_exist(Protobuf_SRCS ${PROTOBUF_ROOT}/src/google/protobuf/wrappers.pb.cc ) +include_directories(BEFORE "${PROTOBUF_ROOT}/src") # ensure using if own headers: https://github.com/opencv/opencv/issues/13328 add_library(libprotobuf STATIC ${Protobuf_SRCS}) target_include_directories(libprotobuf SYSTEM PUBLIC $) set_target_properties(libprotobuf From da555a2c9bdce45a944ff4c85a2981b79d763724 Mon Sep 17 00:00:00 2001 From: Clement Courbet Date: Thu, 20 Aug 2020 14:12:33 +0200 Subject: [PATCH 002/152] Optimize opencv dft by vectorizing radix2 and radix3. This is useful for non power-of-two sizes when WITH_IPP is not an option. This shows consistent improvement over openCV benchmarks, and we measure even larger improvements on our internal workloads. For example, for 320x480, `32FC*`, we can see a ~5% improvement}, as `320=2^6*5` and `480=2^5*3*5`, so the improved radix3 version is used. `64FC*` is flat as expected, as we do not specialize the functors for `double` in this change. ``` dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, 0, false) 1.239 1.153 1.07 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, 0, true) 0.991 0.926 1.07 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_COMPLEX_OUTPUT, false) 1.367 1.281 1.07 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_COMPLEX_OUTPUT, true) 1.114 1.049 1.06 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_INVERSE, false) 1.313 1.254 1.05 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_INVERSE, true) 1.027 0.977 1.05 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 1.296 1.217 1.06 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 1.039 0.963 1.08 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_ROWS, false) 0.542 0.524 1.04 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_ROWS, true) 0.293 0.277 1.06 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_SCALE, false) 1.265 1.175 1.08 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC1, DFT_SCALE, true) 1.004 0.942 1.07 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, 0, false) 1.292 1.280 1.01 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, 0, true) 1.038 1.030 1.01 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_COMPLEX_OUTPUT, false) 1.484 1.488 1.00 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_COMPLEX_OUTPUT, true) 1.222 1.224 1.00 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_INVERSE, false) 1.380 1.355 1.02 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_INVERSE, true) 1.117 1.133 0.99 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 1.372 1.383 0.99 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 1.117 1.127 0.99 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_ROWS, false) 0.546 0.539 1.01 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_ROWS, true) 0.293 0.299 0.98 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_SCALE, false) 1.351 1.339 1.01 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 64FC1, DFT_SCALE, true) 1.099 1.092 1.01 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, 0, false) 2.235 2.123 1.05 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, 0, true) 1.843 1.727 1.07 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_COMPLEX_OUTPUT, false) 2.189 2.109 1.04 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_COMPLEX_OUTPUT, true) 1.827 1.754 1.04 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_INVERSE, false) 2.392 2.309 1.04 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_INVERSE, true) 1.951 1.865 1.05 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 2.391 2.293 1.04 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 1.954 1.882 1.04 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_ROWS, false) 0.811 0.815 0.99 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_ROWS, true) 0.426 0.437 0.98 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_SCALE, false) 2.268 2.152 1.05 dft::Size_MatType_FlagsType_NzeroRows::(320x480, 32FC2, DFT_SCALE, true) 1.893 1.788 1.06 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, 0, false) 4.546 4.395 1.03 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, 0, true) 3.616 3.426 1.06 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_COMPLEX_OUTPUT, false) 4.843 4.668 1.04 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_COMPLEX_OUTPUT, true) 3.825 3.748 1.02 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_INVERSE, false) 4.720 4.525 1.04 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_INVERSE, true) 3.743 3.601 1.04 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 4.755 4.527 1.05 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 3.744 3.586 1.04 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_ROWS, false) 1.992 2.012 0.99 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_ROWS, true) 1.048 1.048 1.00 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_SCALE, false) 4.625 4.451 1.04 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC1, DFT_SCALE, true) 3.643 3.491 1.04 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, 0, false) 4.499 4.488 1.00 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, 0, true) 3.559 3.555 1.00 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_COMPLEX_OUTPUT, false) 5.155 5.165 1.00 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_COMPLEX_OUTPUT, true) 4.103 4.101 1.00 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_INVERSE, false) 5.484 5.474 1.00 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_INVERSE, true) 4.617 4.518 1.02 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 5.547 5.509 1.01 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 4.553 4.554 1.00 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_ROWS, false) 2.067 2.018 1.02 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_ROWS, true) 1.104 1.079 1.02 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_SCALE, false) 4.665 4.619 1.01 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 64FC1, DFT_SCALE, true) 3.698 3.681 1.00 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, 0, false) 8.774 8.275 1.06 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, 0, true) 6.975 6.527 1.07 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_COMPLEX_OUTPUT, false) 8.720 8.270 1.05 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_COMPLEX_OUTPUT, true) 6.928 6.532 1.06 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_INVERSE, false) 9.272 8.862 1.05 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_INVERSE, true) 7.323 6.946 1.05 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 9.262 8.768 1.06 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 7.298 6.871 1.06 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_ROWS, false) 3.766 3.639 1.03 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_ROWS, true) 1.932 1.889 1.02 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_SCALE, false) 8.865 8.417 1.05 dft::Size_MatType_FlagsType_NzeroRows::(800x600, 32FC2, DFT_SCALE, true) 7.067 6.643 1.06 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, 0, false) 10.014 10.141 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, 0, true) 7.600 7.632 1.00 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_COMPLEX_OUTPUT, false) 11.059 11.283 0.98 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_COMPLEX_OUTPUT, true) 8.475 8.552 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_INVERSE, false) 12.678 12.789 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_INVERSE, true) 10.445 10.359 1.01 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 12.626 12.925 0.98 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 10.538 10.553 1.00 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_ROWS, false) 5.041 5.084 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_ROWS, true) 2.595 2.607 1.00 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_SCALE, false) 10.231 10.330 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC1, DFT_SCALE, true) 7.786 7.815 1.00 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, 0, false) 13.597 13.302 1.02 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, 0, true) 10.377 10.207 1.02 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_COMPLEX_OUTPUT, false) 15.940 15.545 1.03 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_COMPLEX_OUTPUT, true) 12.299 12.230 1.01 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_INVERSE, false) 15.270 15.181 1.01 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_INVERSE, true) 12.757 12.339 1.03 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 15.512 15.157 1.02 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 12.505 12.635 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_ROWS, false) 6.359 6.255 1.02 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_ROWS, true) 3.314 3.248 1.02 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_SCALE, false) 13.937 13.733 1.01 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 64FC1, DFT_SCALE, true) 10.782 10.495 1.03 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, 0, false) 18.985 18.926 1.00 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, 0, true) 14.256 14.509 0.98 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_COMPLEX_OUTPUT, false) 18.696 19.021 0.98 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_COMPLEX_OUTPUT, true) 14.290 14.429 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_INVERSE, false) 20.135 20.296 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_INVERSE, true) 15.390 15.512 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 20.121 20.354 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 15.341 15.605 0.98 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_ROWS, false) 8.932 9.084 0.98 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_ROWS, true) 4.539 4.649 0.98 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_SCALE, false) 19.137 19.303 0.99 dft::Size_MatType_FlagsType_NzeroRows::(1280x1024, 32FC2, DFT_SCALE, true) 14.565 14.808 0.98 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, 0, false) 22.553 21.171 1.07 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, 0, true) 17.850 16.390 1.09 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_COMPLEX_OUTPUT, false) 24.062 22.634 1.06 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_COMPLEX_OUTPUT, true) 19.342 17.932 1.08 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_INVERSE, false) 28.609 27.326 1.05 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_INVERSE, true) 24.591 23.289 1.06 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 28.667 27.467 1.04 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 24.671 23.309 1.06 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_ROWS, false) 9.458 9.077 1.04 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_ROWS, true) 4.709 4.566 1.03 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_SCALE, false) 22.791 21.583 1.06 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC1, DFT_SCALE, true) 18.029 16.691 1.08 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, 0, false) 25.238 24.427 1.03 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, 0, true) 19.636 19.270 1.02 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_COMPLEX_OUTPUT, false) 28.342 27.957 1.01 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_COMPLEX_OUTPUT, true) 22.413 22.477 1.00 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_INVERSE, false) 26.465 26.085 1.01 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_INVERSE, true) 21.972 21.704 1.01 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 26.497 26.127 1.01 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 22.010 21.523 1.02 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_ROWS, false) 11.188 10.774 1.04 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_ROWS, true) 6.094 5.916 1.03 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_SCALE, false) 25.728 24.934 1.03 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 64FC1, DFT_SCALE, true) 20.077 19.653 1.02 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, 0, false) 43.834 40.726 1.08 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, 0, true) 35.198 32.218 1.09 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_COMPLEX_OUTPUT, false) 43.743 40.897 1.07 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_COMPLEX_OUTPUT, true) 35.240 32.226 1.09 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_INVERSE, false) 46.022 42.612 1.08 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_INVERSE, true) 36.779 33.961 1.08 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 46.396 42.723 1.09 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 37.025 33.874 1.09 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_ROWS, false) 17.334 16.832 1.03 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_ROWS, true) 9.212 8.970 1.03 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_SCALE, false) 44.190 41.211 1.07 dft::Size_MatType_FlagsType_NzeroRows::(1920x1080, 32FC2, DFT_SCALE, true) 35.900 32.888 1.09 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, 0, false) 40.948 38.256 1.07 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, 0, true) 33.825 30.759 1.10 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_COMPLEX_OUTPUT, false) 53.210 53.584 0.99 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_COMPLEX_OUTPUT, true) 46.356 46.712 0.99 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_INVERSE, false) 47.471 47.213 1.01 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_INVERSE, true) 40.491 41.363 0.98 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 46.724 47.049 0.99 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 40.834 41.381 0.99 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_ROWS, false) 14.508 14.490 1.00 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_ROWS, true) 7.832 7.828 1.00 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_SCALE, false) 41.491 38.341 1.08 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC1, DFT_SCALE, true) 34.587 31.208 1.11 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, 0, false) 65.155 63.173 1.03 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, 0, true) 56.091 54.752 1.02 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_COMPLEX_OUTPUT, false) 71.549 70.626 1.01 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_COMPLEX_OUTPUT, true) 62.319 61.437 1.01 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_INVERSE, false) 61.480 59.540 1.03 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_INVERSE, true) 54.047 52.650 1.03 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 61.752 61.366 1.01 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 54.400 53.665 1.01 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_ROWS, false) 20.219 19.704 1.03 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_ROWS, true) 11.145 10.868 1.03 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_SCALE, false) 66.220 64.525 1.03 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 64FC1, DFT_SCALE, true) 57.389 56.114 1.02 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, 0, false) 86.761 88.128 0.98 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, 0, true) 75.528 76.725 0.98 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_COMPLEX_OUTPUT, false) 86.750 88.223 0.98 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_COMPLEX_OUTPUT, true) 75.830 76.809 0.99 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_INVERSE, false) 91.728 92.161 1.00 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_INVERSE, true) 78.797 79.876 0.99 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, false) 92.163 92.177 1.00 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_INVERSE|DFT_COMPLEX_OUTPUT, true) 78.957 79.863 0.99 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_ROWS, false) 24.781 25.576 0.97 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_ROWS, true) 13.226 13.695 0.97 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_SCALE, false) 87.990 89.324 0.99 dft::Size_MatType_FlagsType_NzeroRows::(2048x2048, 32FC2, DFT_SCALE, true) 76.732 77.869 0.99 ``` --- modules/core/src/dxt.cpp | 389 ++++++++++++++++++++++++++++----------- 1 file changed, 280 insertions(+), 109 deletions(-) diff --git a/modules/core/src/dxt.cpp b/modules/core/src/dxt.cpp index bfa61d0502..b307703a32 100644 --- a/modules/core/src/dxt.cpp +++ b/modules/core/src/dxt.cpp @@ -122,6 +122,33 @@ static const double DFTTab[][2] = { 1.00000000000000000, 0.00000000292583616 } }; +namespace { +template +struct Constants { + static const T sin_120; + static const T fft5_2; + static const T fft5_3; + static const T fft5_4; + static const T fft5_5; +}; + +template +const T Constants::sin_120 = (T)0.86602540378443864676372317075294; + +template +const T Constants::fft5_2 = (T)0.559016994374947424102293417182819; + +template +const T Constants::fft5_3 = (T)-0.951056516295153572116439333379382; + +template +const T Constants::fft5_4 = (T)-1.538841768587626701285145288018455; + +template +const T Constants::fft5_5 = (T)0.363271264002680442947733378740309; + +} //namespace + #define BitRev(i,shift) \ ((int)((((unsigned)bitrevTab[(i)&255] << 24)+ \ ((unsigned)bitrevTab[((i)>> 8)&255] << 16)+ \ @@ -372,6 +399,149 @@ DFTInit( int n0, int nf, const int* factors, int* itab, int elem_size, void* _wa } } +// Reference radix-2 implementation. +template struct DFT_R2 +{ + void operator()(Complex* dst, const int c_n, const int n, const int dw0, const Complex* wave) const { + const int nx = n/2; + for(int i = 0 ; i < c_n; i += n) + { + Complex* v = dst + i; + T r0 = v[0].re + v[nx].re; + T i0 = v[0].im + v[nx].im; + T r1 = v[0].re - v[nx].re; + T i1 = v[0].im - v[nx].im; + v[0].re = r0; v[0].im = i0; + v[nx].re = r1; v[nx].im = i1; + + for( int j = 1, dw = dw0; j < nx; j++, dw += dw0 ) + { + v = dst + i + j; + r1 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im; + i1 = v[nx].im*wave[dw].re + v[nx].re*wave[dw].im; + r0 = v[0].re; i0 = v[0].im; + + v[0].re = r0 + r1; v[0].im = i0 + i1; + v[nx].re = r0 - r1; v[nx].im = i0 - i1; + } + } + } +}; + +// Reference radix-3 implementation. +template struct DFT_R3 +{ + void operator()(Complex* dst, const int c_n, const int n, const int dw0, const Complex* wave) const { + const int nx = n / 3; + for(int i = 0; i < c_n; i += n ) + { + { + Complex* v = dst + i; + T r1 = v[nx].re + v[nx*2].re; + T i1 = v[nx].im + v[nx*2].im; + T r0 = v[0].re; + T i0 = v[0].im; + T r2 = Constants::sin_120*(v[nx].im - v[nx*2].im); + T i2 = Constants::sin_120*(v[nx*2].re - v[nx].re); + v[0].re = r0 + r1; v[0].im = i0 + i1; + r0 -= (T)0.5*r1; i0 -= (T)0.5*i1; + v[nx].re = r0 + r2; v[nx].im = i0 + i2; + v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2; + } + + for(int j = 1, dw = dw0; j < nx; j++, dw += dw0 ) + { + Complex* v = dst + i + j; + T r0 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im; + T i0 = v[nx].re*wave[dw].im + v[nx].im*wave[dw].re; + T i2 = v[nx*2].re*wave[dw*2].re - v[nx*2].im*wave[dw*2].im; + T r2 = v[nx*2].re*wave[dw*2].im + v[nx*2].im*wave[dw*2].re; + T r1 = r0 + i2; T i1 = i0 + r2; + + r2 = Constants::sin_120*(i0 - r2); i2 = Constants::sin_120*(i2 - r0); + r0 = v[0].re; i0 = v[0].im; + v[0].re = r0 + r1; v[0].im = i0 + i1; + r0 -= (T)0.5*r1; i0 -= (T)0.5*i1; + v[nx].re = r0 + r2; v[nx].im = i0 + i2; + v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2; + } + } + } +}; + +// Reference radix-5 implementation. +template struct DFT_R5 +{ + void operator()(Complex* dst, const int c_n, const int n, const int dw0, const Complex* wave) const { + const int nx = n / 5; + for(int i = 0; i < c_n; i += n ) + { + for(int j = 0, dw = 0; j < nx; j++, dw += dw0 ) + { + Complex* v0 = dst + i + j; + Complex* v1 = v0 + nx*2; + Complex* v2 = v1 + nx*2; + + T r0, i0, r1, i1, r2, i2, r3, i3, r4, i4, r5, i5; + + r3 = v0[nx].re*wave[dw].re - v0[nx].im*wave[dw].im; + i3 = v0[nx].re*wave[dw].im + v0[nx].im*wave[dw].re; + r2 = v2[0].re*wave[dw*4].re - v2[0].im*wave[dw*4].im; + i2 = v2[0].re*wave[dw*4].im + v2[0].im*wave[dw*4].re; + + r1 = r3 + r2; i1 = i3 + i2; + r3 -= r2; i3 -= i2; + + r4 = v1[nx].re*wave[dw*3].re - v1[nx].im*wave[dw*3].im; + i4 = v1[nx].re*wave[dw*3].im + v1[nx].im*wave[dw*3].re; + r0 = v1[0].re*wave[dw*2].re - v1[0].im*wave[dw*2].im; + i0 = v1[0].re*wave[dw*2].im + v1[0].im*wave[dw*2].re; + + r2 = r4 + r0; i2 = i4 + i0; + r4 -= r0; i4 -= i0; + + r0 = v0[0].re; i0 = v0[0].im; + r5 = r1 + r2; i5 = i1 + i2; + + v0[0].re = r0 + r5; v0[0].im = i0 + i5; + + r0 -= (T)0.25*r5; i0 -= (T)0.25*i5; + r1 = Constants::fft5_2*(r1 - r2); i1 = Constants::fft5_2*(i1 - i2); + r2 = -Constants::fft5_3*(i3 + i4); i2 = Constants::fft5_3*(r3 + r4); + + i3 *= -Constants::fft5_5; r3 *= Constants::fft5_5; + i4 *= -Constants::fft5_4; r4 *= Constants::fft5_4; + + r5 = r2 + i3; i5 = i2 + r3; + r2 -= i4; i2 -= r4; + + r3 = r0 + r1; i3 = i0 + i1; + r0 -= r1; i0 -= i1; + + v0[nx].re = r3 + r2; v0[nx].im = i3 + i2; + v2[0].re = r3 - r2; v2[0].im = i3 - i2; + + v1[0].re = r0 + r5; v1[0].im = i0 + i5; + v1[nx].re = r0 - r5; v1[nx].im = i0 - i5; + } + } + } +}; + +template struct DFT_VecR2 +{ + void operator()(Complex* dst, const int c_n, const int n, const int dw0, const Complex* wave) const { + return DFT_R2()(dst, c_n, n, dw0, wave); + } +}; + +template struct DFT_VecR3 +{ + void operator()(Complex* dst, const int c_n, const int n, const int dw0, const Complex* wave) const { + return DFT_R3()(dst, c_n, n, dw0, wave); + } +}; + template struct DFT_VecR4 { int operator()(Complex*, int, int, int&, const Complex*) const { return 1; } @@ -379,6 +549,98 @@ template struct DFT_VecR4 #if CV_SSE3 +// multiplies *a and *b: +// r_re + i*r_im = (a_re + i*a_im)*(b_re + i*b_im) +// r_re and r_im are placed respectively in bits 31:0 and 63:32 of the resulting +// vector register. +inline __m128 complexMul(const Complex* const a, const Complex* const b) { + const __m128 z = _mm_setzero_ps(); + const __m128 neg_elem0 = _mm_set_ps(0.0f,0.0f,0.0f,-0.0f); + // v_a[31:0] is a->re and v_a[63:32] is a->im. + const __m128 v_a = _mm_loadl_pi(z, (const __m64*)a); + const __m128 v_b = _mm_loadl_pi(z, (const __m64*)b); + // x_1 = v[nx] * wave[dw]. + const __m128 v_a_riri = _mm_shuffle_ps(v_a, v_a, _MM_SHUFFLE(0, 1, 0, 1)); + const __m128 v_b_irri = _mm_shuffle_ps(v_b, v_b, _MM_SHUFFLE(1, 0, 0, 1)); + const __m128 mul = _mm_mul_ps(v_a_riri, v_b_irri); + const __m128 xored = _mm_xor_ps(mul, neg_elem0); + return _mm_hadd_ps(xored, z); +} + +// optimized radix-2 transform +template<> struct DFT_VecR2 { + void operator()(Complex* dst, const int c_n, const int n, const int dw0, const Complex* wave) const { + const __m128 z = _mm_setzero_ps(); + const int nx = n/2; + for(int i = 0 ; i < c_n; i += n) + { + { + Complex* v = dst + i; + float r0 = v[0].re + v[nx].re; + float i0 = v[0].im + v[nx].im; + float r1 = v[0].re - v[nx].re; + float i1 = v[0].im - v[nx].im; + v[0].re = r0; v[0].im = i0; + v[nx].re = r1; v[nx].im = i1; + } + + for( int j = 1, dw = dw0; j < nx; j++, dw += dw0 ) + { + Complex* v = dst + i + j; + const __m128 x_1 = complexMul(&v[nx], &wave[dw]); + const __m128 v_0 = _mm_loadl_pi(z, (const __m64*)&v[0]); + _mm_storel_pi((__m64*)&v[0], _mm_add_ps(v_0, x_1)); + _mm_storel_pi((__m64*)&v[nx], _mm_sub_ps(v_0, x_1)); + } + } + } +}; + +// Optimized radix-3 implementation. +template<> struct DFT_VecR3 { + void operator()(Complex* dst, const int c_n, const int n, const int dw0, const Complex* wave) const { + const int nx = n / 3; + const __m128 z = _mm_setzero_ps(); + const __m128 neg_elem1 = _mm_set_ps(0.0f,0.0f,-0.0f,0.0f); + const __m128 sin_120 = _mm_set1_ps(Constants::sin_120); + const __m128 one_half = _mm_set1_ps(0.5f); + for(int i = 0; i < c_n; i += n ) + { + { + Complex* v = dst + i; + + float r1 = v[nx].re + v[nx*2].re; + float i1 = v[nx].im + v[nx*2].im; + float r0 = v[0].re; + float i0 = v[0].im; + float r2 = Constants::sin_120*(v[nx].im - v[nx*2].im); + float i2 = Constants::sin_120*(v[nx*2].re - v[nx].re); + v[0].re = r0 + r1; v[0].im = i0 + i1; + r0 -= (float)0.5*r1; i0 -= (float)0.5*i1; + v[nx].re = r0 + r2; v[nx].im = i0 + i2; + v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2; + } + + for(int j = 1, dw = dw0; j < nx; j++, dw += dw0 ) + { + Complex* v = dst + i + j; + const __m128 x_0 = complexMul(&v[nx], &wave[dw]); + const __m128 x_2 = complexMul(&v[nx*2], &wave[dw*2]); + const __m128 x_1 = _mm_add_ps(x_0, x_2); + + const __m128 v_0 = _mm_loadl_pi(z, (const __m64*)&v[0]); + _mm_storel_pi((__m64*)&v[0], _mm_add_ps(v_0, x_1)); + + const __m128 x_3 = _mm_mul_ps(sin_120, _mm_xor_ps(_mm_sub_ps(x_2, x_0), neg_elem1)); + const __m128 x_3s = _mm_shuffle_ps(x_3, x_3, _MM_SHUFFLE(0, 1, 0, 1)); + const __m128 x_4 = _mm_sub_ps(v_0, _mm_mul_ps(one_half, x_1)); + _mm_storel_pi((__m64*)&v[nx], _mm_add_ps(x_4, x_3s)); + _mm_storel_pi((__m64*)&v[nx*2], _mm_sub_ps(x_4, x_3s)); + } + } + } +}; + // optimized radix-4 transform template<> struct DFT_VecR4 { @@ -573,12 +835,6 @@ struct OcvDftOptions { template static void DFT(const OcvDftOptions & c, const Complex* src, Complex* dst) { - static const T sin_120 = (T)0.86602540378443864676372317075294; - static const T fft5_2 = (T)0.559016994374947424102293417182819; - static const T fft5_3 = (T)-0.951056516295153572116439333379382; - static const T fft5_4 = (T)-1.538841768587626701285145288018455; - static const T fft5_5 = (T)0.363271264002680442947733378740309; - const Complex* wave = (Complex*)c.wave; const int * itab = c.itab; @@ -775,30 +1031,18 @@ DFT(const OcvDftOptions & c, const Complex* src, Complex* dst) for( ; n < c.factors[0]; ) { // do the remaining radix-2 transform - nx = n; n *= 2; dw0 /= 2; - for( i = 0; i < c.n; i += n ) + if(c.haveSSE3) { - Complex* v = dst + i; - T r0 = v[0].re + v[nx].re; - T i0 = v[0].im + v[nx].im; - T r1 = v[0].re - v[nx].re; - T i1 = v[0].im - v[nx].im; - v[0].re = r0; v[0].im = i0; - v[nx].re = r1; v[nx].im = i1; - - for( j = 1, dw = dw0; j < nx; j++, dw += dw0 ) - { - v = dst + i + j; - r1 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im; - i1 = v[nx].im*wave[dw].re + v[nx].re*wave[dw].im; - r0 = v[0].re; i0 = v[0].im; - - v[0].re = r0 + r1; v[0].im = i0 + i1; - v[nx].re = r0 - r1; v[nx].im = i0 - i1; - } + DFT_VecR2 vr2; + vr2(dst, c.n, n, dw0, wave); + } + else + { + DFT_R2 vr2; + vr2(dst, c.n, n, dw0, wave); } } } @@ -813,94 +1057,21 @@ DFT(const OcvDftOptions & c, const Complex* src, Complex* dst) if( factor == 3 ) { - // radix-3 - for( i = 0; i < c.n; i += n ) + if(c.haveSSE3) { - Complex* v = dst + i; - - T r1 = v[nx].re + v[nx*2].re; - T i1 = v[nx].im + v[nx*2].im; - T r0 = v[0].re; - T i0 = v[0].im; - T r2 = sin_120*(v[nx].im - v[nx*2].im); - T i2 = sin_120*(v[nx*2].re - v[nx].re); - v[0].re = r0 + r1; v[0].im = i0 + i1; - r0 -= (T)0.5*r1; i0 -= (T)0.5*i1; - v[nx].re = r0 + r2; v[nx].im = i0 + i2; - v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2; - - for( j = 1, dw = dw0; j < nx; j++, dw += dw0 ) - { - v = dst + i + j; - r0 = v[nx].re*wave[dw].re - v[nx].im*wave[dw].im; - i0 = v[nx].re*wave[dw].im + v[nx].im*wave[dw].re; - i2 = v[nx*2].re*wave[dw*2].re - v[nx*2].im*wave[dw*2].im; - r2 = v[nx*2].re*wave[dw*2].im + v[nx*2].im*wave[dw*2].re; - r1 = r0 + i2; i1 = i0 + r2; - - r2 = sin_120*(i0 - r2); i2 = sin_120*(i2 - r0); - r0 = v[0].re; i0 = v[0].im; - v[0].re = r0 + r1; v[0].im = i0 + i1; - r0 -= (T)0.5*r1; i0 -= (T)0.5*i1; - v[nx].re = r0 + r2; v[nx].im = i0 + i2; - v[nx*2].re = r0 - r2; v[nx*2].im = i0 - i2; - } + DFT_VecR3 vr3; + vr3(dst, c.n, n, dw0, wave); + } + else + { + DFT_R3 vr3; + vr3(dst, c.n, n, dw0, wave); } } else if( factor == 5 ) { - // radix-5 - for( i = 0; i < c.n; i += n ) - { - for( j = 0, dw = 0; j < nx; j++, dw += dw0 ) - { - Complex* v0 = dst + i + j; - Complex* v1 = v0 + nx*2; - Complex* v2 = v1 + nx*2; - - T r0, i0, r1, i1, r2, i2, r3, i3, r4, i4, r5, i5; - - r3 = v0[nx].re*wave[dw].re - v0[nx].im*wave[dw].im; - i3 = v0[nx].re*wave[dw].im + v0[nx].im*wave[dw].re; - r2 = v2[0].re*wave[dw*4].re - v2[0].im*wave[dw*4].im; - i2 = v2[0].re*wave[dw*4].im + v2[0].im*wave[dw*4].re; - - r1 = r3 + r2; i1 = i3 + i2; - r3 -= r2; i3 -= i2; - - r4 = v1[nx].re*wave[dw*3].re - v1[nx].im*wave[dw*3].im; - i4 = v1[nx].re*wave[dw*3].im + v1[nx].im*wave[dw*3].re; - r0 = v1[0].re*wave[dw*2].re - v1[0].im*wave[dw*2].im; - i0 = v1[0].re*wave[dw*2].im + v1[0].im*wave[dw*2].re; - - r2 = r4 + r0; i2 = i4 + i0; - r4 -= r0; i4 -= i0; - - r0 = v0[0].re; i0 = v0[0].im; - r5 = r1 + r2; i5 = i1 + i2; - - v0[0].re = r0 + r5; v0[0].im = i0 + i5; - - r0 -= (T)0.25*r5; i0 -= (T)0.25*i5; - r1 = fft5_2*(r1 - r2); i1 = fft5_2*(i1 - i2); - r2 = -fft5_3*(i3 + i4); i2 = fft5_3*(r3 + r4); - - i3 *= -fft5_5; r3 *= fft5_5; - i4 *= -fft5_4; r4 *= fft5_4; - - r5 = r2 + i3; i5 = i2 + r3; - r2 -= i4; i2 -= r4; - - r3 = r0 + r1; i3 = i0 + i1; - r0 -= r1; i0 -= i1; - - v0[nx].re = r3 + r2; v0[nx].im = i3 + i2; - v2[0].re = r3 - r2; v2[0].im = i3 - i2; - - v1[0].re = r0 + r5; v1[0].im = i0 + i5; - v1[nx].re = r0 - r5; v1[nx].im = i0 - i5; - } - } + DFT_R5 vr5; + vr5(dst, c.n, n, dw0, wave); } else { From e3da18121f836a3ef361a8f1847f6ae30d536770 Mon Sep 17 00:00:00 2001 From: Michael Gruner Date: Thu, 1 Oct 2020 19:48:15 -0600 Subject: [PATCH 003/152] Enable a GMainLoop when capturing using GStreamer A running GMainLoop processes many events on the GLib/GStreamer world. While some things may work without it, many others wont. Examples of these are signals, timers and many other source events. The problem becomes more concerning by the fact that some GStreamer elements rely on signals to work. This commit allows the user to specify an OpenCV option to start a main loop, if needed. Since the loop blocks, this is done in a separate thread. --- modules/videoio/src/cap_gstreamer.cpp | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/modules/videoio/src/cap_gstreamer.cpp b/modules/videoio/src/cap_gstreamer.cpp index 4d9330daf8..67c119cd36 100644 --- a/modules/videoio/src/cap_gstreamer.cpp +++ b/modules/videoio/src/cap_gstreamer.cpp @@ -54,6 +54,7 @@ #include #include +#include #include #include @@ -107,6 +108,7 @@ template<> inline void GSafePtr_release(GstBuffer** pPtr) { if (pPtr) template<> inline void GSafePtr_release(GstSample** pPtr) { if (pPtr) { gst_sample_unref(*pPtr); *pPtr = NULL; } } template<> inline void GSafePtr_release(GstBus** pPtr) { if (pPtr) { gst_object_unref(G_OBJECT(*pPtr)); *pPtr = NULL; } } template<> inline void GSafePtr_release(GstMessage** pPtr) { if (pPtr) { gst_message_unref(*pPtr); *pPtr = NULL; } } +template<> inline void GSafePtr_release(GMainLoop** pPtr) { if (pPtr) { g_main_loop_unref(*pPtr); *pPtr = NULL; } } template<> inline void GSafePtr_release(GstEncodingVideoProfile** pPtr) { if (pPtr) { gst_encoding_profile_unref(*pPtr); *pPtr = NULL; } } template<> inline void GSafePtr_release(GstEncodingContainerProfile** pPtr) { if (pPtr) { gst_object_unref(G_OBJECT(*pPtr)); *pPtr = NULL; } } @@ -194,10 +196,15 @@ public: private: bool isFailed; bool call_deinit; + bool start_loop; + GSafePtr loop; + std::thread thread; + gst_initializer() : isFailed(false) { call_deinit = utils::getConfigurationParameterBool("OPENCV_VIDEOIO_GSTREAMER_CALL_DEINIT", false); + start_loop = utils::getConfigurationParameterBool("OPENCV_VIDEOIO_GSTREAMER_START_MAINLOOP", false); GSafePtr err; gst_init_check(NULL, NULL, err.getRef()); @@ -215,6 +222,14 @@ private: isFailed = true; return; } + + if (start_loop) + { + loop.attach(g_main_loop_new (NULL, FALSE)); + thread = std::thread([this](){ + g_main_loop_run (loop); + }); + } } ~gst_initializer() { @@ -223,6 +238,12 @@ private: // Debug leaks: GST_LEAKS_TRACER_STACK_TRACE=1 GST_DEBUG="GST_TRACER:7" GST_TRACERS="leaks" gst_deinit(); } + + if (start_loop) + { + g_main_loop_quit(loop); + thread.join(); + } } }; From 1546b9bf994258a6aeea7b0cf3f7675d3b1c0514 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 6 Oct 2020 00:12:58 +0300 Subject: [PATCH 004/152] build: winpack_dldt with dldt 2021.1.0 --- .../20201005-dldt-fix-cldnn-compilation.patch | 12 ++ platforms/winpack_dldt/2020.4/patch.config.py | 1 + .../2021.1/20200413-dldt-pdb.patch | 14 ++ .../20200604-dldt-disable-multidevice.patch | 13 ++ ...20201005-dldt-disable-unused-targets.patch | 178 ++++++++++++++++++ platforms/winpack_dldt/2021.1/patch.config.py | 3 + .../winpack_dldt/2021.1/sysroot.config.py | 56 ++++++ platforms/winpack_dldt/build_package.py | 4 +- 8 files changed, 279 insertions(+), 2 deletions(-) create mode 100644 platforms/winpack_dldt/2020.4/20201005-dldt-fix-cldnn-compilation.patch create mode 100644 platforms/winpack_dldt/2021.1/20200413-dldt-pdb.patch create mode 100644 platforms/winpack_dldt/2021.1/20200604-dldt-disable-multidevice.patch create mode 100644 platforms/winpack_dldt/2021.1/20201005-dldt-disable-unused-targets.patch create mode 100644 platforms/winpack_dldt/2021.1/patch.config.py create mode 100644 platforms/winpack_dldt/2021.1/sysroot.config.py diff --git a/platforms/winpack_dldt/2020.4/20201005-dldt-fix-cldnn-compilation.patch b/platforms/winpack_dldt/2020.4/20201005-dldt-fix-cldnn-compilation.patch new file mode 100644 index 0000000000..152af26c6f --- /dev/null +++ b/platforms/winpack_dldt/2020.4/20201005-dldt-fix-cldnn-compilation.patch @@ -0,0 +1,12 @@ +diff --git a/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h b/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h +index 3dbdfd0b..6b04b910 100644 +--- a/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h ++++ b/inference-engine/thirdparty/clDNN/kernel_selector/common/tensor_type.h +@@ -15,6 +15,7 @@ + + #pragma once + ++#include + #include "common_types.h" + #include "common_tools.h" + #include diff --git a/platforms/winpack_dldt/2020.4/patch.config.py b/platforms/winpack_dldt/2020.4/patch.config.py index 496f383800..6fe3e6e1c1 100644 --- a/platforms/winpack_dldt/2020.4/patch.config.py +++ b/platforms/winpack_dldt/2020.4/patch.config.py @@ -1,3 +1,4 @@ applyPatch('20200701-dldt-disable-unused-targets.patch') applyPatch('20200413-dldt-pdb.patch') applyPatch('20200604-dldt-disable-multidevice.patch') +applyPatch('20201005-dldt-fix-cldnn-compilation.patch') diff --git a/platforms/winpack_dldt/2021.1/20200413-dldt-pdb.patch b/platforms/winpack_dldt/2021.1/20200413-dldt-pdb.patch new file mode 100644 index 0000000000..081c3c04f6 --- /dev/null +++ b/platforms/winpack_dldt/2021.1/20200413-dldt-pdb.patch @@ -0,0 +1,14 @@ +diff --git a/CMakeLists.txt b/CMakeLists.txt +index 1f981ed2..90eb500a 100644 +--- a/CMakeLists.txt ++++ b/CMakeLists.txt +@@ -22,6 +22,9 @@ endif() + + project(OpenVINO) + ++set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Zi /FS") ++set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /DEBUG /OPT:REF /OPT:ICF") ++ + set(OpenVINO_MAIN_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) + set(IE_MAIN_SOURCE_DIR ${OpenVINO_MAIN_SOURCE_DIR}/inference-engine) + list(APPEND CMAKE_MODULE_PATH "${OpenVINO_MAIN_SOURCE_DIR}/cmake") diff --git a/platforms/winpack_dldt/2021.1/20200604-dldt-disable-multidevice.patch b/platforms/winpack_dldt/2021.1/20200604-dldt-disable-multidevice.patch new file mode 100644 index 0000000000..b4d1ef9bfe --- /dev/null +++ b/platforms/winpack_dldt/2021.1/20200604-dldt-disable-multidevice.patch @@ -0,0 +1,13 @@ +diff --git a/inference-engine/src/CMakeLists.txt b/inference-engine/src/CMakeLists.txt +index 0ba0dd78..7d34e7cb 100644 +--- a/inference-engine/src/CMakeLists.txt ++++ b/inference-engine/src/CMakeLists.txt +@@ -26,7 +26,7 @@ endif() + + add_subdirectory(hetero_plugin) + +-add_subdirectory(multi_device) ++#add_subdirectory(multi_device) + + add_subdirectory(transformations) + diff --git a/platforms/winpack_dldt/2021.1/20201005-dldt-disable-unused-targets.patch b/platforms/winpack_dldt/2021.1/20201005-dldt-disable-unused-targets.patch new file mode 100644 index 0000000000..0f56717ee4 --- /dev/null +++ b/platforms/winpack_dldt/2021.1/20201005-dldt-disable-unused-targets.patch @@ -0,0 +1,178 @@ +diff --git a/inference-engine/CMakeLists.txt b/inference-engine/CMakeLists.txt +index 7f45ab02..a7bac7e9 100644 +--- a/inference-engine/CMakeLists.txt ++++ b/inference-engine/CMakeLists.txt +@@ -70,7 +70,7 @@ if(ENABLE_TESTS) + add_subdirectory(tests) + endif() + +-add_subdirectory(tools) ++#add_subdirectory(tools) + + function(ie_build_samples) + # samples should be build with the same flags as from OpenVINO package, +@@ -89,7 +89,7 @@ endfunction() + + # gflags and format_reader targets are kept inside of samples directory and + # they must be built even if samples build is disabled (required for tests and tools). +-ie_build_samples() ++#ie_build_samples() + + file(GLOB_RECURSE SAMPLES_SOURCES samples/*.cpp samples/*.hpp samples/*.h) + add_cpplint_target(sample_cpplint +@@ -180,7 +180,7 @@ endif() + # Developer package + # + +-ie_developer_export_targets(format_reader) ++#ie_developer_export_targets(format_reader) + ie_developer_export_targets(${NGRAPH_LIBRARIES}) + + # for Template plugin +@@ -188,7 +188,7 @@ if(NGRAPH_INTERPRETER_ENABLE) + ie_developer_export_targets(ngraph_backend interpreter_backend) + endif() + +-ie_developer_export() ++#ie_developer_export() + + configure_file( + "${IE_MAIN_SOURCE_DIR}/cmake/developer_package_config.cmake.in" +diff --git a/inference-engine/src/inference_engine/CMakeLists.txt b/inference-engine/src/inference_engine/CMakeLists.txt +index 9ab88898..8badb591 100644 +--- a/inference-engine/src/inference_engine/CMakeLists.txt ++++ b/inference-engine/src/inference_engine/CMakeLists.txt +@@ -118,7 +118,7 @@ add_cpplint_target(${TARGET_NAME}_plugin_api_cpplint FOR_SOURCES ${plugin_api_sr + + # Create common base object library + +-add_library(${TARGET_NAME}_common_obj OBJECT ++add_library(${TARGET_NAME}_common_obj OBJECT EXCLUDE_FROM_ALL + ${IE_BASE_SOURCE_FILES}) + + target_compile_definitions(${TARGET_NAME}_common_obj PRIVATE IMPLEMENT_INFERENCE_ENGINE_API) +@@ -132,7 +132,7 @@ target_include_directories(${TARGET_NAME}_common_obj SYSTEM PRIVATE + + # Create object library + +-add_library(${TARGET_NAME}_obj OBJECT ++add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL + ${LIBRARY_SRC} + ${LIBRARY_HEADERS} + ${PUBLIC_HEADERS}) +@@ -183,7 +183,7 @@ ie_register_plugins(MAIN_TARGET ${TARGET_NAME} + + # Static library used for unit tests which are always built + +-add_library(${TARGET_NAME}_s STATIC ++add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL + $ + $ + $ +diff --git a/inference-engine/src/legacy_api/CMakeLists.txt b/inference-engine/src/legacy_api/CMakeLists.txt +index ed87a073..b30e6671 100644 +--- a/inference-engine/src/legacy_api/CMakeLists.txt ++++ b/inference-engine/src/legacy_api/CMakeLists.txt +@@ -26,7 +26,7 @@ endif() + + # Create object library + +-add_library(${TARGET_NAME}_obj OBJECT ++add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL + ${LIBRARY_SRC} + ${PUBLIC_HEADERS}) + +diff --git a/inference-engine/src/mkldnn_plugin/CMakeLists.txt b/inference-engine/src/mkldnn_plugin/CMakeLists.txt +index 166818cd..6c1e8e36 100644 +--- a/inference-engine/src/mkldnn_plugin/CMakeLists.txt ++++ b/inference-engine/src/mkldnn_plugin/CMakeLists.txt +@@ -193,7 +193,7 @@ cross_compiled_file(${TARGET_NAME} + + # add test object library + +-add_library(${TARGET_NAME}_obj OBJECT ${SOURCES} ${HEADERS}) ++add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL ${SOURCES} ${HEADERS}) + + target_include_directories(${TARGET_NAME}_obj PRIVATE $ + $ +diff --git a/inference-engine/src/preprocessing/CMakeLists.txt b/inference-engine/src/preprocessing/CMakeLists.txt +index f4fed72a..9cedd6b5 100644 +--- a/inference-engine/src/preprocessing/CMakeLists.txt ++++ b/inference-engine/src/preprocessing/CMakeLists.txt +@@ -124,7 +124,7 @@ endif() + + # Create object library + +-add_library(${TARGET_NAME}_obj OBJECT ++add_library(${TARGET_NAME}_obj OBJECT EXCLUDE_FROM_ALL + ${LIBRARY_SRC} + ${LIBRARY_HEADERS}) + +@@ -175,7 +175,7 @@ add_cpplint_target(${TARGET_NAME}_cpplint FOR_TARGETS ${TARGET_NAME} + + # Static library used for unit tests which are always built + +-add_library(${TARGET_NAME}_s STATIC ++add_library(${TARGET_NAME}_s STATIC EXCLUDE_FROM_ALL + $) + + set_ie_threading_interface_for(${TARGET_NAME}_s) +diff --git a/inference-engine/src/vpu/common/CMakeLists.txt b/inference-engine/src/vpu/common/CMakeLists.txt +index b291d5b4..74ab8287 100644 +--- a/inference-engine/src/vpu/common/CMakeLists.txt ++++ b/inference-engine/src/vpu/common/CMakeLists.txt +@@ -57,7 +57,7 @@ add_common_target("vpu_common_lib" FALSE) + + # Unit tests support for graph transformer + if(WIN32) +- add_common_target("vpu_common_lib_test_static" TRUE) ++ #add_common_target("vpu_common_lib_test_static" TRUE) + else() + add_library("vpu_common_lib_test_static" ALIAS "vpu_common_lib") + endif() +diff --git a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt +index a4543745..807b8e36 100644 +--- a/inference-engine/src/vpu/graph_transformer/CMakeLists.txt ++++ b/inference-engine/src/vpu/graph_transformer/CMakeLists.txt +@@ -65,7 +65,7 @@ add_graph_transformer_target("vpu_graph_transformer" FALSE) + + # Unit tests support for graph transformer + if(WIN32) +- add_graph_transformer_target("vpu_graph_transformer_test_static" TRUE) ++ #add_graph_transformer_target("vpu_graph_transformer_test_static" TRUE) + else() + add_library("vpu_graph_transformer_test_static" ALIAS "vpu_graph_transformer") + endif() +diff --git a/inference-engine/thirdparty/CMakeLists.txt b/inference-engine/thirdparty/CMakeLists.txt +index a2550bfa..10ce316f 100644 +--- a/inference-engine/thirdparty/CMakeLists.txt ++++ b/inference-engine/thirdparty/CMakeLists.txt +@@ -56,13 +56,13 @@ function(build_with_lto) + endfunction() + + ie_build_pugixml() +- add_subdirectory(stb_lib) ++ #add_subdirectory(stb_lib) + add_subdirectory(ade) + add_subdirectory(fluid/modules/gapi) + + target_include_directories(pugixml INTERFACE "$") + +- set_target_properties(pugixml ade fluid stb_image ++ set_target_properties(pugixml ade fluid + PROPERTIES FOLDER thirdparty) + + # developer package +diff --git a/inference-engine/thirdparty/pugixml/CMakeLists.txt b/inference-engine/thirdparty/pugixml/CMakeLists.txt +index 8bcb2801..380fb468 100644 +--- a/inference-engine/thirdparty/pugixml/CMakeLists.txt ++++ b/inference-engine/thirdparty/pugixml/CMakeLists.txt +@@ -41,7 +41,7 @@ if(BUILD_SHARED_LIBS) + else() + add_library(pugixml STATIC ${SOURCES}) + if (MSVC) +- add_library(pugixml_mt STATIC ${SOURCES}) ++ #add_library(pugixml_mt STATIC ${SOURCES}) + #if (WIN32) + # set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /MT") + # set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MTd") diff --git a/platforms/winpack_dldt/2021.1/patch.config.py b/platforms/winpack_dldt/2021.1/patch.config.py new file mode 100644 index 0000000000..8c890159e6 --- /dev/null +++ b/platforms/winpack_dldt/2021.1/patch.config.py @@ -0,0 +1,3 @@ +applyPatch('20201005-dldt-disable-unused-targets.patch') +applyPatch('20200413-dldt-pdb.patch') +applyPatch('20200604-dldt-disable-multidevice.patch') diff --git a/platforms/winpack_dldt/2021.1/sysroot.config.py b/platforms/winpack_dldt/2021.1/sysroot.config.py new file mode 100644 index 0000000000..fc8dffd32a --- /dev/null +++ b/platforms/winpack_dldt/2021.1/sysroot.config.py @@ -0,0 +1,56 @@ +sysroot_bin_dir = prepare_dir(self.sysrootdir / 'bin') +copytree(self.build_dir / 'install', self.sysrootdir / 'ngraph') +#rm_one(self.sysrootdir / 'ngraph' / 'lib' / 'ngraph.dll') + +build_config = 'Release' if not self.config.build_debug else 'Debug' +build_bin_dir = self.build_dir / 'bin' / 'intel64' / build_config + +def copy_bin(name): + global build_bin_dir, sysroot_bin_dir + copytree(build_bin_dir / name, sysroot_bin_dir / name) + +dll_suffix = 'd' if self.config.build_debug else '' +def copy_dll(name): + global copy_bin, dll_suffix + copy_bin(name + dll_suffix + '.dll') + copy_bin(name + dll_suffix + '.pdb') + +copy_bin('cache.json') +copy_dll('clDNNPlugin') +copy_dll('HeteroPlugin') +copy_dll('inference_engine') +copy_dll('inference_engine_ir_reader') +copy_dll('inference_engine_legacy') +copy_dll('inference_engine_transformations') # runtime +copy_dll('inference_engine_lp_transformations') # runtime +copy_dll('MKLDNNPlugin') # runtime +copy_dll('myriadPlugin') # runtime +#copy_dll('MultiDevicePlugin') # runtime, not used +copy_dll('ngraph') +copy_bin('plugins.xml') +copytree(self.build_dir / 'bin' / 'intel64' / 'pcie-ma248x.elf', sysroot_bin_dir / 'pcie-ma248x.elf') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2x8x.mvcmd', sysroot_bin_dir / 'usb-ma2x8x.mvcmd') +copytree(self.build_dir / 'bin' / 'intel64' / 'usb-ma2450.mvcmd', sysroot_bin_dir / 'usb-ma2450.mvcmd') + +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb' / 'bin', sysroot_bin_dir) +copytree(self.srcdir / 'inference-engine' / 'temp' / 'tbb', self.sysrootdir / 'tbb') + +sysroot_ie_dir = prepare_dir(self.sysrootdir / 'deployment_tools' / 'inference_engine') +sysroot_ie_lib_dir = prepare_dir(sysroot_ie_dir / 'lib' / 'intel64') + +copytree(self.srcdir / 'inference-engine' / 'include', sysroot_ie_dir / 'include') +if not self.config.build_debug: + copytree(self.build_dir / 'install' / 'lib' / 'ngraph.lib', sysroot_ie_lib_dir / 'ngraph.lib') + copytree(build_bin_dir / 'inference_engine.lib', sysroot_ie_lib_dir / 'inference_engine.lib') + copytree(build_bin_dir / 'inference_engine_ir_reader.lib', sysroot_ie_lib_dir / 'inference_engine_ir_reader.lib') + copytree(build_bin_dir / 'inference_engine_legacy.lib', sysroot_ie_lib_dir / 'inference_engine_legacy.lib') +else: + copytree(self.build_dir / 'install' / 'lib' / 'ngraphd.lib', sysroot_ie_lib_dir / 'ngraphd.lib') + copytree(build_bin_dir / 'inference_engined.lib', sysroot_ie_lib_dir / 'inference_engined.lib') + copytree(build_bin_dir / 'inference_engine_ir_readerd.lib', sysroot_ie_lib_dir / 'inference_engine_ir_readerd.lib') + copytree(build_bin_dir / 'inference_engine_legacyd.lib', sysroot_ie_lib_dir / 'inference_engine_legacyd.lib') + +sysroot_license_dir = prepare_dir(self.sysrootdir / 'etc' / 'licenses') +copytree(self.srcdir / 'LICENSE', sysroot_license_dir / 'dldt-LICENSE') +copytree(self.srcdir / 'ngraph/LICENSE', sysroot_license_dir / 'ngraph-LICENSE') +copytree(self.sysrootdir / 'tbb/LICENSE', sysroot_license_dir / 'tbb-LICENSE') diff --git a/platforms/winpack_dldt/build_package.py b/platforms/winpack_dldt/build_package.py index c33e07026b..8618e11cbf 100644 --- a/platforms/winpack_dldt/build_package.py +++ b/platforms/winpack_dldt/build_package.py @@ -443,8 +443,8 @@ class Builder: def main(): dldt_src_url = 'https://github.com/openvinotoolkit/openvino' - dldt_src_commit = '2020.4' - dldt_release = '2020040000' + dldt_src_commit = '2021.1' + dldt_release = '2021010000' build_cache_dir_default = os.environ.get('BUILD_CACHE_DIR', '.build_cache') build_subst_drive = os.environ.get('BUILD_SUBST_DRIVE', None) From d9ea9bedb20bb55a06439f7f013046025f7ff2a7 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 7 Oct 2020 20:16:40 +0000 Subject: [PATCH 005/152] doxygen: backport style changes --- doc/stylesheet.css | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/doc/stylesheet.css b/doc/stylesheet.css index 806e03bfd4..eae05885f7 100644 --- a/doc/stylesheet.css +++ b/doc/stylesheet.css @@ -6,12 +6,11 @@ body, table, div, p, dl { } code { - font: 12px Consolas, "Liberation Mono", Courier, monospace; font-size: 85%; + font-family: "SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace; white-space: pre-wrap; padding: 1px 5px; - padding: 0; - background-color: #ddd; + background-color: rgb(223, 229, 241); vertical-align: baseline; } @@ -20,6 +19,16 @@ body { margin: 0 auto; } +div.fragment { + padding: 3px; + padding-bottom: 0px; +} + +div.line { + padding-bottom: 3px; + font-family: "SFMono-Regular",Consolas,"Liberation Mono",Menlo,Courier,monospace; +} + div.contents { width: 980px; margin: 0 auto; @@ -35,3 +44,11 @@ span.arrow { div.image img{ max-width: 900px; } + +#projectlogo +{ + text-align: center; + vertical-align: middle; + border-collapse: separate; + padding-left: 0.5em; +} From af2f8c69f03ae5ee381d60f2f44bad849bd05e06 Mon Sep 17 00:00:00 2001 From: "Anastasiya(Asya) Pronina" Date: Thu, 8 Oct 2020 00:48:49 +0300 Subject: [PATCH 006/152] Merge pull request #18496 from AsyaPronina:comp_args_serialization Serialization && deserialization for compile arguments * Initial stub * Add test on serialization of a custom type * Namespaces rework * Fix isSupported in test struct * Fix clang lookup issue * Initial implementation * Drop the isSupported flag * Initial implementation * Removed internal header inclusion * Switched to public API * Implemented serialization * Adding desirialize: WIP * Fixed merge errors * Implemented * Final polishing * Addressed review comments and added debug throw * Added FluidROI test * Polishing * Polishing * Polishing * Polishing * Polishing * Updated CMakeLists.txt * Fixed comments * Addressed review comments * Removed decay from deserialize_arg * Addressed review comments * Removed extra inclusion * Fixed Win64 warning * Update gcommon.hpp * Update serialization.cpp * Update gcommon.hpp * gapi: drop GAPI_EXPORTS_W_SIMPLE from GCompileArg Co-authored-by: Smirnov Alexey Co-authored-by: AsyaPronina <155jj@mail.ru> --- modules/gapi/CMakeLists.txt | 1 + modules/gapi/include/opencv2/gapi/gcommon.hpp | 32 ++++++++- modules/gapi/include/opencv2/gapi/s11n.hpp | 69 +++++++++++++++---- .../gapi/include/opencv2/gapi/s11n/base.hpp | 36 ++++++++++ modules/gapi/misc/python/shadow_gapi.hpp | 2 + modules/gapi/src/api/s11n.cpp | 7 ++ .../src/backends/common/serialization.cpp | 16 ++++- .../src/backends/common/serialization.hpp | 7 ++ .../cpu/gapi_ocv_stateful_kernel_tests.cpp | 8 +-- modules/gapi/test/s11n/gapi_s11n_tests.cpp | 22 ++++++ 10 files changed, 180 insertions(+), 20 deletions(-) create mode 100644 modules/gapi/include/opencv2/gapi/s11n/base.hpp diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt index 0278d9326a..88ddeead16 100644 --- a/modules/gapi/CMakeLists.txt +++ b/modules/gapi/CMakeLists.txt @@ -49,6 +49,7 @@ file(GLOB gapi_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/ocl/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/own/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/render/*.hpp" + "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/s11n/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/util/*.hpp" diff --git a/modules/gapi/include/opencv2/gapi/gcommon.hpp b/modules/gapi/include/opencv2/gapi/gcommon.hpp index e008fe4bf1..2b260ed07c 100644 --- a/modules/gapi/include/opencv2/gapi/gcommon.hpp +++ b/modules/gapi/include/opencv2/gapi/gcommon.hpp @@ -19,6 +19,7 @@ #include #include #include +#include namespace cv { @@ -94,6 +95,15 @@ enum class GShape: int GFRAME, }; +namespace gapi { +namespace s11n { +namespace detail { +template struct wrap_serialize; +} // namespace detail +} // namespace s11n +} // namespace gapi + + struct GCompileArg; namespace detail { @@ -139,7 +149,7 @@ namespace detail { * passed in (a variadic template parameter pack) into a vector of * cv::GCompileArg objects. */ -struct GAPI_EXPORTS_W_SIMPLE GCompileArg +struct GCompileArg { public: // NB: Required for pythnon bindings @@ -151,6 +161,7 @@ public: template::value, int>::type = 0> explicit GCompileArg(T &&t) : tag(detail::CompileArgTag::type>::tag()) + , serializeF(&cv::gapi::s11n::detail::wrap_serialize::serialize) , arg(t) { } @@ -165,7 +176,13 @@ public: return util::any_cast(arg); } + void serialize(cv::gapi::s11n::IOStream& os) const + { + serializeF(os, *this); + } + private: + std::function serializeF; util::any arg; }; @@ -198,6 +215,19 @@ inline cv::util::optional getCompileArg(const cv::GCompileArgs &args) } return cv::util::optional(); } + +namespace s11n { +namespace detail { +template struct wrap_serialize +{ + static void serialize(IOStream& os, const GCompileArg& arg) + { + using decayed_type = typename std::decay::type; + S11N::serialize(os, arg.get()); + } +}; +} // namespace detail +} // namespace s11n } // namespace gapi /** diff --git a/modules/gapi/include/opencv2/gapi/s11n.hpp b/modules/gapi/include/opencv2/gapi/s11n.hpp index 0b61304c5c..e8a8dbcab4 100644 --- a/modules/gapi/include/opencv2/gapi/s11n.hpp +++ b/modules/gapi/include/opencv2/gapi/s11n.hpp @@ -10,6 +10,7 @@ #include #include #include +#include #include namespace cv { @@ -17,14 +18,13 @@ namespace gapi { namespace detail { GAPI_EXPORTS cv::GComputation getGraph(const std::vector &p); -} // namespace detail -namespace detail { GAPI_EXPORTS cv::GMetaArgs getMetaArgs(const std::vector &p); -} // namespace detail -namespace detail { GAPI_EXPORTS cv::GRunArgs getRunArgs(const std::vector &p); + + template + cv::GCompileArgs getCompileArgs(const std::vector &p); } // namespace detail GAPI_EXPORTS std::vector serialize(const cv::GComputation &c); @@ -35,6 +35,7 @@ T deserialize(const std::vector &p); //} //ananymous namespace +GAPI_EXPORTS std::vector serialize(const cv::GCompileArgs&); GAPI_EXPORTS std::vector serialize(const cv::GMetaArgs&); GAPI_EXPORTS std::vector serialize(const cv::GRunArgs&); @@ -53,6 +54,11 @@ cv::GRunArgs deserialize(const std::vector &p) { return detail::getRunArgs(p); } +template inline +typename std::enable_if::value, GCompileArgs>:: +type deserialize(const std::vector &p) { + return detail::getCompileArgs(p); +} } // namespace gapi } // namespace cv @@ -91,6 +97,10 @@ struct GAPI_EXPORTS IIStream { virtual IIStream& operator>> (std::string &) = 0; }; +namespace detail { +GAPI_EXPORTS std::unique_ptr getInStream(const std::vector &p); +} // namespace detail + //////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////// // S11N operators @@ -174,17 +184,48 @@ IIStream& operator>> (IIStream& is, std::vector &ts) { } return is; } - -namespace detail { - // Will be used along with default types if possible in specific cases (compile args, etc) - // Note: actual implementation is defined by user - template - struct GAPI_EXPORTS S11N { - static void serialize(IOStream &, const T &) {} - static T deserialize(IIStream &) { T t; return t; } - }; -} // namespace detail } // namespace s11n + +namespace detail +{ +template struct deserialize_arg; + +template<> struct deserialize_arg> { +static GCompileArg exec(cv::gapi::s11n::IIStream&, const std::string&) { + throw std::logic_error("Passed arg can't be deserialized!"); + } +}; + +template +struct deserialize_arg> { +static GCompileArg exec(cv::gapi::s11n::IIStream& is, const std::string& tag) { + if (tag == cv::detail::CompileArgTag::tag()) { + return GCompileArg { + cv::gapi::s11n::detail::S11N::deserialize(is) + }; + } + + return deserialize_arg>::exec(is, tag); +} +}; + +template +cv::GCompileArgs getCompileArgs(const std::vector &p) { + std::unique_ptr pIs = cv::gapi::s11n::detail::getInStream(p); + cv::gapi::s11n::IIStream& is = *pIs; + cv::GCompileArgs args; + + uint32_t sz = 0; + is >> sz; + for (uint32_t i = 0; i < sz; ++i) { + std::string tag; + is >> tag; + args.push_back(cv::gapi::detail::deserialize_arg>::exec(is, tag)); + } + + return args; +} +} // namespace detail } // namespace gapi } // namespace cv diff --git a/modules/gapi/include/opencv2/gapi/s11n/base.hpp b/modules/gapi/include/opencv2/gapi/s11n/base.hpp new file mode 100644 index 0000000000..6bf5d5fb0f --- /dev/null +++ b/modules/gapi/include/opencv2/gapi/s11n/base.hpp @@ -0,0 +1,36 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_S11N_BASE_HPP +#define OPENCV_GAPI_S11N_BASE_HPP + +#include + +namespace cv { +namespace gapi { +namespace s11n { +struct IOStream; +struct IIStream; + +namespace detail { +// Will be used along with default types if possible in specific cases (compile args, etc) +// Note: actual implementation is defined by user +template +struct S11N { + static void serialize(IOStream &, const T &) { + GAPI_Assert(false && "No serialization routine is provided!"); + } + static T deserialize(IIStream &) { + GAPI_Assert(false && "No deserialization routine is provided!"); + } +}; + +} // namespace detail +} // namespace s11n +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_S11N_BASE_HPP diff --git a/modules/gapi/misc/python/shadow_gapi.hpp b/modules/gapi/misc/python/shadow_gapi.hpp index dab083def7..4f988440e8 100644 --- a/modules/gapi/misc/python/shadow_gapi.hpp +++ b/modules/gapi/misc/python/shadow_gapi.hpp @@ -3,6 +3,8 @@ namespace cv { + struct GAPI_EXPORTS_W_SIMPLE GCompileArg { }; + GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage pkg); class GAPI_EXPORTS_W_SIMPLE GProtoArg { }; diff --git a/modules/gapi/src/api/s11n.cpp b/modules/gapi/src/api/s11n.cpp index 54a0850394..52c276fd5d 100644 --- a/modules/gapi/src/api/s11n.cpp +++ b/modules/gapi/src/api/s11n.cpp @@ -44,6 +44,13 @@ std::vector cv::gapi::serialize(const cv::GRunArgs& ra) return os.data(); } +std::vector cv::gapi::serialize(const cv::GCompileArgs& ca) +{ + cv::gapi::s11n::ByteMemoryOutStream os; + serialize(os, ca); + return os.data(); +} + // FIXME: This function should move from S11N to GRunArg-related entities. // it has nothing to do with the S11N as it is cv::GRunArgsP cv::gapi::bind(cv::GRunArgs &results) diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp index c0b3281449..ca73d29ffb 100644 --- a/modules/gapi/src/backends/common/serialization.cpp +++ b/modules/gapi/src/backends/common/serialization.cpp @@ -329,6 +329,13 @@ IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Line &l) { // G-API types ///////////////////////////////////////////////////////////////// +IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg) +{ + os << arg.tag; + arg.serialize(os); + return os; +} + // Stubs (empty types) IOStream& operator<< (IOStream& os, cv::util::monostate ) {return os;} @@ -865,6 +872,14 @@ IIStream& ByteMemoryInStream::operator>> (std::string& str) { return *this; } +GAPI_EXPORTS std::unique_ptr detail::getInStream(const std::vector &p) { + return std::unique_ptr(new ByteMemoryInStream(p)); +} + +GAPI_EXPORTS void serialize(IOStream& os, const cv::GCompileArgs &ca) { + os << ca; +} + GAPI_EXPORTS void serialize(IOStream& os, const cv::GMetaArgs &ma) { os << ma; } @@ -882,7 +897,6 @@ GAPI_EXPORTS GRunArgs run_args_deserialize(IIStream& is) { return s; } - } // namespace s11n } // namespace gapi } // namespace cv diff --git a/modules/gapi/src/backends/common/serialization.hpp b/modules/gapi/src/backends/common/serialization.hpp index 4c60e71d87..e2aa56c45b 100644 --- a/modules/gapi/src/backends/common/serialization.hpp +++ b/modules/gapi/src/backends/common/serialization.hpp @@ -40,6 +40,8 @@ struct GSerialized { // G-API types ///////////////////////////////////////////////////////////////// +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg); + GAPI_EXPORTS IOStream& operator<< (IOStream& os, cv::util::monostate ); GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::util::monostate &); @@ -268,6 +270,11 @@ public: virtual IIStream& operator>> (std::string &) override; }; +namespace detail { +GAPI_EXPORTS std::unique_ptr getInStream(const std::vector &p); +} // namespace detail + +GAPI_EXPORTS void serialize(IOStream& os, const cv::GCompileArgs &ca); GAPI_EXPORTS void serialize(IOStream& os, const cv::GMetaArgs &ma); GAPI_EXPORTS void serialize(IOStream& os, const cv::GRunArgs &ra); GAPI_EXPORTS GMetaArgs meta_args_deserialize(IIStream& is); diff --git a/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp b/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp index 75ca7989e0..fe6a1f94af 100644 --- a/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp +++ b/modules/gapi/test/cpu/gapi_ocv_stateful_kernel_tests.cpp @@ -21,7 +21,7 @@ namespace opencv_test { std::string method; }; -} +} // namespace opencv_test namespace cv { @@ -31,11 +31,11 @@ namespace cv { static const char* tag() { - return "org.opencv.test..background_substractor_state_params"; + return "org.opencv.test.background_substractor_state_params"; } }; - } -} + } // namespace detail +} // namespace cv namespace opencv_test { diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp index 10fe586188..1a4faec12c 100644 --- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp +++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp @@ -34,6 +34,17 @@ namespace detail { } // namespace gapi } // namespace cv + +namespace cv { +namespace detail { +template<> struct CompileArgTag { + static const char* tag() { + return "org.opencv.test.mycustomtype"; + } +}; +} // namespace detail +} // namespace cv + namespace opencv_test { struct S11N_Basic: public ::testing::Test { @@ -511,4 +522,15 @@ TEST_F(S11N_Basic, Test_Custom_Type) { MyCustomType new_var = cv::gapi::s11n::detail::S11N::deserialize(is); EXPECT_EQ(var, new_var); } + +TEST_F(S11N_Basic, Test_Custom_CompileArg) { + MyCustomType customVar{1248, "World", {1280, 720, 640, 480}, {{5, 32434142342}, {7, 34242432}}}; + + std::vector sArgs = cv::gapi::serialize(cv::compile_args(customVar)); + + GCompileArgs dArgs = cv::gapi::deserialize(sArgs); + + MyCustomType dCustomVar = cv::gapi::getCompileArg(dArgs).value(); + EXPECT_EQ(customVar, dCustomVar); +} } // namespace opencv_test From ae265a48c75948a2f089c4e3dc4fbc84a9b77ee3 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Thu, 8 Oct 2020 11:50:07 +0300 Subject: [PATCH 007/152] Doc: fixed warnings when CUDA modules are missing --- .../config_reference/config_reference.markdown | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/tutorials/introduction/config_reference/config_reference.markdown b/doc/tutorials/introduction/config_reference/config_reference.markdown index 5107af6a8a..a6bde80102 100644 --- a/doc/tutorials/introduction/config_reference/config_reference.markdown +++ b/doc/tutorials/introduction/config_reference/config_reference.markdown @@ -247,15 +247,18 @@ When `WITH_` option is enabled: `WITH_CUDA` (default: _OFF_) -Many algorithms have been implemented using CUDA acceleration, these functions are located in separate modules: @ref cuda. CUDA toolkit must be installed from the official NVIDIA site as a prerequisite. For cmake versions older than 3.9 OpenCV uses own `cmake/FindCUDA.cmake` script, for newer versions - the one packaged with CMake. Additional options can be used to control build process, e.g. `CUDA_GENERATION` or `CUDA_ARCH_BIN`. These parameters are not documented yet, please consult with the `cmake/OpenCVDetectCUDA.cmake` script for details. - -Some tutorials can be found in the corresponding section: @ref tutorial_table_of_content_gpu +Many algorithms have been implemented using CUDA acceleration, these functions are located in separate modules. CUDA toolkit must be installed from the official NVIDIA site as a prerequisite. For cmake versions older than 3.9 OpenCV uses own `cmake/FindCUDA.cmake` script, for newer versions - the one packaged with CMake. Additional options can be used to control build process, e.g. `CUDA_GENERATION` or `CUDA_ARCH_BIN`. These parameters are not documented yet, please consult with the `cmake/OpenCVDetectCUDA.cmake` script for details. @note Since OpenCV version 4.0 all CUDA-accelerated algorithm implementations have been moved to the _opencv_contrib_ repository. To build _opencv_ and _opencv_contrib_ together check @ref tutorial_config_reference_general_contrib. +@cond CUDA_MODULES +@note Some tutorials can be found in the corresponding section: @ref tutorial_table_of_content_gpu +@see @ref cuda +@endcond + @see https://en.wikipedia.org/wiki/CUDA -TODO: other options: `WITH_CUFFT`, `WITH_CUBLAS`, WITH_NVCUVID`? +TODO: other options: `WITH_CUFFT`, `WITH_CUBLAS`, `WITH_NVCUVID`? ### OpenCL support From 6da05f708639bae5ee9b2b338793bfa3be2d764b Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 6 Oct 2020 23:40:27 +0000 Subject: [PATCH 008/152] dnn(test): update tests for OpenVINO 2021.1 --- modules/dnn/perf/perf_net.cpp | 18 +++++++++++++++++- modules/dnn/test/test_darknet_importer.cpp | 5 +++++ modules/dnn/test/test_torch_importer.cpp | 4 ++++ 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/modules/dnn/perf/perf_net.cpp b/modules/dnn/perf/perf_net.cpp index 23ece025e7..600193915d 100644 --- a/modules/dnn/perf/perf_net.cpp +++ b/modules/dnn/perf/perf_net.cpp @@ -111,6 +111,10 @@ PERF_TEST_P_(DNNTestNetwork, ENet) if ((backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) || (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)) throw SkipTestException(""); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2021010000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + throw SkipTestException(""); +#endif processNet("dnn/Enet-model-best.net", "", "enet.yml", Mat(cv::Size(512, 256), CV_32FC3)); } @@ -202,6 +206,10 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv3) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL_FP16) throw SkipTestException("Test is disabled in OpenVINO 2020.4"); #endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000) // nGraph compilation failure + if (target == DNN_TARGET_MYRIAD) + throw SkipTestException(""); +#endif Mat sample = imread(findDataFile("dnn/dog416.png")); cvtColor(sample, sample, COLOR_BGR2RGB); @@ -214,7 +222,7 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv4) { if (backend == DNN_BACKEND_HALIDE) throw SkipTestException(""); - if (target == DNN_TARGET_MYRIAD) + if (target == DNN_TARGET_MYRIAD) // not enough resources throw SkipTestException(""); #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000) // nGraph compilation failure if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_OPENCL) @@ -233,6 +241,10 @@ PERF_TEST_P_(DNNTestNetwork, YOLOv4_tiny) { if (backend == DNN_BACKEND_HALIDE) throw SkipTestException(""); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000) // nGraph compilation failure + if (target == DNN_TARGET_MYRIAD) + throw SkipTestException(""); +#endif Mat sample = imread(findDataFile("dnn/dog416.png")); cvtColor(sample, sample, COLOR_BGR2RGB); Mat inp; @@ -263,6 +275,10 @@ PERF_TEST_P_(DNNTestNetwork, Inception_v2_Faster_RCNN) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2019020000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) throw SkipTestException("Test is disabled in OpenVINO 2019R2"); +#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) + throw SkipTestException("Test is disabled in OpenVINO 2021.1 / MYRIAD"); #endif if (backend == DNN_BACKEND_HALIDE || (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 && target != DNN_TARGET_CPU) || diff --git a/modules/dnn/test/test_darknet_importer.cpp b/modules/dnn/test/test_darknet_importer.cpp index 4986e8e399..a47e771084 100644 --- a/modules/dnn/test/test_darknet_importer.cpp +++ b/modules/dnn/test/test_darknet_importer.cpp @@ -625,6 +625,11 @@ TEST_P(Test_Darknet_nets, YOLOv4_tiny) target == DNN_TARGET_CPU ? CV_TEST_TAG_MEMORY_512MB : CV_TEST_TAG_MEMORY_1GB ); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2021010000) // nGraph compilation failure + if (target == DNN_TARGET_MYRIAD) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif + const double confThreshold = 0.5; // batchId, classId, confidence, left, top, right, bottom const int N0 = 2; diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp index 3be22d6d25..9eb89f3d78 100644 --- a/modules/dnn/test/test_torch_importer.cpp +++ b/modules/dnn/test/test_torch_importer.cpp @@ -359,6 +359,10 @@ TEST_P(Test_Torch_nets, ENet_accuracy) if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); throw SkipTestException(""); } +#endif +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2021010000) + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); #endif if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) { From e87a0baa4b8e14f0b5534954a191358bea62f650 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 8 Oct 2020 20:27:03 +0000 Subject: [PATCH 009/152] dnn(test): enable tests from issue 17953 --- modules/dnn/test/test_layers.cpp | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 085e5a51b8..e61b754b86 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -2349,13 +2349,6 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy) if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - // bug: https://github.com/opencv/opencv/issues/17953 - if (eltwiseOp == "sum" && actType == "ChannelsPReLU" && bias_term == false && - backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) - { - applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - } - // bug: https://github.com/opencv/opencv/issues/17964 if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); @@ -2442,13 +2435,6 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy) if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - // bug: https://github.com/opencv/opencv/issues/17953 - if (actType == "ChannelsPReLU" && bias_term == false && - backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) - { - applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - } - Net net; int convId = net.addLayer(convParams.name, convParams.type, convParams); int activId = net.addLayer(activationParams.name, activationParams.type, activationParams); From 76be3529f491aa5835602021c65ef99975837cac Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Fri, 9 Oct 2020 01:12:25 +0300 Subject: [PATCH 010/152] Merge pull request #18419 from TolyaTalamanov:at/generic-inference [G-API] Introduce generic version for cv::gapi::infer * Introduce generic infer * Move Generic to infer.hpp * Removew num_outs * Fix windows warnings * Fix comments to review * Fix doxygen * Add comment * Fix comments to review * standoalone ifdef in ginfer.cpp * Fix test --- modules/gapi/include/opencv2/gapi/gcall.hpp | 9 ++- modules/gapi/include/opencv2/gapi/infer.hpp | 78 +++++++++++++++++++ .../gapi/include/opencv2/gapi/infer/ie.hpp | 32 +++++++- modules/gapi/src/api/gcall.cpp | 10 +++ modules/gapi/src/api/gcall_priv.hpp | 3 +- modules/gapi/src/api/ginfer.cpp | 30 +++++++ modules/gapi/src/backends/ie/giebackend.cpp | 18 ++++- modules/gapi/src/compiler/gmodel.cpp | 8 +- modules/gapi/src/compiler/gmodel.hpp | 7 +- modules/gapi/src/compiler/gmodelbuilder.cpp | 2 +- .../gapi/test/infer/gapi_infer_ie_test.cpp | 53 +++++++++++++ 11 files changed, 237 insertions(+), 13 deletions(-) diff --git a/modules/gapi/include/opencv2/gapi/gcall.hpp b/modules/gapi/include/opencv2/gapi/gcall.hpp index ed5ba5fde8..511eca1408 100644 --- a/modules/gapi/include/opencv2/gapi/gcall.hpp +++ b/modules/gapi/include/opencv2/gapi/gcall.hpp @@ -56,11 +56,16 @@ public: Priv& priv(); const Priv& priv() const; -protected: - std::shared_ptr m_priv; + // GKernel and params can be modified, it's needed for infer, + // because information about output shapes doesn't exist in compile time + GKernel& kernel(); + cv::util::any& params(); void setArgs(std::vector &&args); +protected: + std::shared_ptr m_priv; + // Public versions return a typed array or opaque, those are implementation details detail::GArrayU yieldArray(int output = 0); detail::GOpaqueU yieldOpaque(int output = 0); diff --git a/modules/gapi/include/opencv2/gapi/infer.hpp b/modules/gapi/include/opencv2/gapi/infer.hpp index 50086dd848..4fdd2df875 100644 --- a/modules/gapi/include/opencv2/gapi/infer.hpp +++ b/modules/gapi/include/opencv2/gapi/infer.hpp @@ -121,6 +121,45 @@ struct GInferBase { } }; +// Struct stores network input/output names. +// Used by infer +struct InOutInfo +{ + std::vector in_names; + std::vector out_names; +}; + +/** + * @{ + * @brief G-API object used to collect network inputs + */ +class GAPI_EXPORTS GInferInputs +{ +public: + cv::GMat& operator[](const std::string& name); + const std::unordered_map& getBlobs() const; + +private: + std::unordered_map in_blobs; +}; +/** @} */ + +/** + * @{ + * @brief G-API object used to collect network outputs + */ +struct GAPI_EXPORTS GInferOutputs +{ +public: + GInferOutputs(std::shared_ptr call); + cv::GMat at(const std::string& name); + +private: + std::shared_ptr m_call; + InOutInfo* m_info = nullptr; + std::unordered_map out_blobs; +}; +/** @} */ // Base "Infer list" kernel. // All notes from "Infer" kernel apply here as well. @@ -254,6 +293,45 @@ typename Net::Result infer(Args&&... args) { return GInfer::on(std::forward(args)...); } +/** + * @brief Special network type + */ +struct Generic { }; + +/** + * @brief Calculates response for generic network + * + * @param tag a network tag + * @param inputs networks's inputs + * @return a GInferOutputs + */ +template GInferOutputs +infer(const std::string& tag, const GInferInputs& inputs) +{ + std::vector input_args; + std::vector input_names; + + const auto& blobs = inputs.getBlobs(); + for (auto&& p : blobs) + { + input_names.push_back(p.first); + input_args.emplace_back(p.second); + } + + GKinds kinds(blobs.size(), cv::detail::OpaqueKind::CV_MAT); + auto call = std::make_shared(GKernel{ + GInferBase::id(), + tag, + GInferBase::getOutMeta, + {}, // outShape will be filled later + std::move(kinds) + }); + + call->setArgs(std::move(input_args)); + call->params() = InOutInfo{input_names, {}}; + + return GInferOutputs{std::move(call)}; +} } // namespace gapi } // namespace cv diff --git a/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/modules/gapi/include/opencv2/gapi/infer/ie.hpp index c6d7f272a8..8421d9e2c9 100644 --- a/modules/gapi/include/opencv2/gapi/infer/ie.hpp +++ b/modules/gapi/include/opencv2/gapi/infer/ie.hpp @@ -17,6 +17,7 @@ #include // GAPI_EXPORTS #include // GKernelPackage +#include // Generic namespace cv { namespace gapi { @@ -58,6 +59,8 @@ namespace detail { // (e.g. topology's partial execution) std::size_t num_in; // How many inputs are defined in the operation std::size_t num_out; // How many outputs are defined in the operation + + bool is_generic; }; } // namespace detail @@ -80,7 +83,7 @@ public: : desc{ model, weights, device, {}, {}, {} , std::tuple_size::value // num_in , std::tuple_size::value // num_out - } { + , false} { }; Params& cfgInputLayers(const typename PortCfg::In &ll) { @@ -107,15 +110,36 @@ public: } // BEGIN(G-API's network parametrization API) - GBackend backend() const { return cv::gapi::ie::backend(); } - std::string tag() const { return Net::tag(); } - cv::util::any params() const { return { desc }; } + GBackend backend() const { return cv::gapi::ie::backend(); } + std::string tag() const { return Net::tag(); } + cv::util::any params() const { return { desc }; } // END(G-API's network parametrization API) protected: detail::ParamDesc desc; }; +template<> +class Params { +public: + Params(const std::string& tag, + const std::string &model, + const std::string &weights, + const std::string &device) + : desc{ model, weights, device, {}, {}, {}, 0u, 0u, true}, m_tag(tag) { + }; + + // BEGIN(G-API's network parametrization API) + GBackend backend() const { return cv::gapi::ie::backend(); } + std::string tag() const { return m_tag; } + cv::util::any params() const { return { desc }; } + // END(G-API's network parametrization API) + +protected: + detail::ParamDesc desc; + std::string m_tag; +}; + } // namespace ie } // namespace gapi } // namespace cv diff --git a/modules/gapi/src/api/gcall.cpp b/modules/gapi/src/api/gcall.cpp index 6f5f65bbfd..6a2121bd36 100644 --- a/modules/gapi/src/api/gcall.cpp +++ b/modules/gapi/src/api/gcall.cpp @@ -78,3 +78,13 @@ const cv::GCall::Priv& cv::GCall::priv() const { return *m_priv; } + +cv::GKernel& cv::GCall::kernel() +{ + return m_priv->m_k; +} + +cv::util::any& cv::GCall::params() +{ + return m_priv->m_params; +} diff --git a/modules/gapi/src/api/gcall_priv.hpp b/modules/gapi/src/api/gcall_priv.hpp index edc2c225dc..b142432c78 100644 --- a/modules/gapi/src/api/gcall_priv.hpp +++ b/modules/gapi/src/api/gcall_priv.hpp @@ -42,10 +42,11 @@ class GCall::Priv { public: std::vector m_args; - const GKernel m_k; + GKernel m_k; // TODO: Rename to "constructionNode" or smt to reflect its lifetime GNode m_node; + cv::util::any m_params; explicit Priv(const GKernel &k); }; diff --git a/modules/gapi/src/api/ginfer.cpp b/modules/gapi/src/api/ginfer.cpp index 98eeef5ab6..31d851b8e6 100644 --- a/modules/gapi/src/api/ginfer.cpp +++ b/modules/gapi/src/api/ginfer.cpp @@ -25,3 +25,33 @@ std::vector cv::gapi::GNetPackage::backends() const { for (const auto &nn : networks) unique_set.insert(nn.backend); return std::vector(unique_set.begin(), unique_set.end()); } + +// FIXME: Inference API is currently only available in full mode +#if !defined(GAPI_STANDALONE) + +cv::GMat& cv::GInferInputs::operator[](const std::string& name) { + return in_blobs[name]; +} + +const std::unordered_map& cv::GInferInputs::getBlobs() const { + return in_blobs; +} + +cv::GInferOutputs::GInferOutputs(std::shared_ptr call) + : m_call(std::move(call)), m_info(cv::util::any_cast(&m_call->params())) +{ +}; + +cv::GMat cv::GInferOutputs::at(const std::string& name) +{ + auto it = out_blobs.find(name); + if (it == out_blobs.end()) { + // FIXME: Avoid modifying GKernel + m_call->kernel().outShapes.push_back(cv::GShape::GMAT); + int out_idx = static_cast(out_blobs.size()); + it = out_blobs.emplace(name, m_call->yield(out_idx)).first; + m_info->out_names.push_back(name); + } + return it->second; +}; +#endif // GAPI_STANDALONE diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp index 1565d03aec..b7bda2fe9f 100644 --- a/modules/gapi/src/backends/ie/giebackend.cpp +++ b/modules/gapi/src/backends/ie/giebackend.cpp @@ -721,9 +721,23 @@ namespace { // FIXME: Introduce a DNNBackend interface which'd specify // the framework for this??? GIEModel gm(gr); - const auto &np = gm.metadata(nh).get(); - const auto &pp = cv::util::any_cast(np.opaque); + auto &np = gm.metadata(nh).get(); + auto &pp = cv::util::any_cast(np.opaque); const auto &ki = cv::util::any_cast(ii.opaque); + + GModel::Graph model(gr); + auto& op = model.metadata(nh).get(); + + // NB: In case generic infer, info about in/out names is stored in operation (op.params) + if (pp.is_generic) + { + auto& info = cv::util::any_cast(op.params); + pp.input_names = info.in_names; + pp.output_names = info.out_names; + pp.num_in = info.in_names.size(); + pp.num_out = info.out_names.size(); + } + gm.metadata(nh).set(IEUnit{pp}); gm.metadata(nh).set(IECallable{ki.run}); gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc}); diff --git a/modules/gapi/src/compiler/gmodel.cpp b/modules/gapi/src/compiler/gmodel.cpp index 39dc1da33b..b5b76fd1c9 100644 --- a/modules/gapi/src/compiler/gmodel.cpp +++ b/modules/gapi/src/compiler/gmodel.cpp @@ -23,12 +23,16 @@ namespace cv { namespace gimpl { -ade::NodeHandle GModel::mkOpNode(GModel::Graph &g, const GKernel &k, const std::vector &args, const std::string &island) +ade::NodeHandle GModel::mkOpNode(GModel::Graph &g, + const GKernel &k, + const std::vector &args, + const cv::util::any ¶ms, + const std::string &island) { ade::NodeHandle op_h = g.createNode(); g.metadata(op_h).set(NodeType{NodeType::OP}); //These extra empty {} are to please GCC (-Wmissing-field-initializers) - g.metadata(op_h).set(Op{k, args, {}, {}}); + g.metadata(op_h).set(Op{k, args, {}, {}, params}); if (!island.empty()) g.metadata(op_h).set(Island{island}); return op_h; diff --git a/modules/gapi/src/compiler/gmodel.hpp b/modules/gapi/src/compiler/gmodel.hpp index 8f78ba49b7..5f02e58354 100644 --- a/modules/gapi/src/compiler/gmodel.hpp +++ b/modules/gapi/src/compiler/gmodel.hpp @@ -61,6 +61,7 @@ struct Op std::vector outs; // TODO: Introduce a new type for resource references cv::gapi::GBackend backend; + cv::util::any params; // Operation specific information }; struct Data @@ -262,7 +263,11 @@ namespace GModel // GAPI_EXPORTS for tests GAPI_EXPORTS void init (Graph& g); - GAPI_EXPORTS ade::NodeHandle mkOpNode(Graph &g, const GKernel &k, const std::vector& args, const std::string &island); + GAPI_EXPORTS ade::NodeHandle mkOpNode(Graph &g, + const GKernel &k, + const std::vector& args, + const cv::util::any& params, + const std::string &island); // Isn't used by the framework or default backends, required for external backend development GAPI_EXPORTS ade::NodeHandle mkDataNode(Graph &g, const GShape shape); diff --git a/modules/gapi/src/compiler/gmodelbuilder.cpp b/modules/gapi/src/compiler/gmodelbuilder.cpp index 87e9ab55b8..80abadd9c6 100644 --- a/modules/gapi/src/compiler/gmodelbuilder.cpp +++ b/modules/gapi/src/compiler/gmodelbuilder.cpp @@ -286,7 +286,7 @@ ade::NodeHandle cv::gimpl::GModelBuilder::put_OpNode(const cv::GNode &node) { GAPI_Assert(node.shape() == GNode::NodeShape::CALL); const auto &call_p = node.call().priv(); - auto nh = cv::gimpl::GModel::mkOpNode(m_gm, call_p.m_k, call_p.m_args, node_p.m_island); + auto nh = cv::gimpl::GModel::mkOpNode(m_gm, call_p.m_k, call_p.m_args, call_p.m_params, node_p.m_island); m_graph_ops[&node_p] = nh; return nh; } diff --git a/modules/gapi/test/infer/gapi_infer_ie_test.cpp b/modules/gapi/test/infer/gapi_infer_ie_test.cpp index 74d8558909..3125705365 100644 --- a/modules/gapi/test/infer/gapi_infer_ie_test.cpp +++ b/modules/gapi/test/infer/gapi_infer_ie_test.cpp @@ -350,6 +350,59 @@ TEST(DISABLED_TestTwoIENNPipeline, InferBasicImage) normAssert(cv::gapi::ie::util::to_ocv(ie_gender2), gapi_gender2, "Test gender output 2"); } +TEST(TestAgeGenderIE, GenericInfer) +{ + initDLDTDataPath(); + + cv::gapi::ie::detail::ParamDesc params; + params.model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml"); + params.weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin"); + params.device_id = "CPU"; + + cv::Mat in_mat(cv::Size(320, 240), CV_8UC3); + cv::randu(in_mat, 0, 255); + + cv::Mat gapi_age, gapi_gender; + + // Load & run IE network + IE::Blob::Ptr ie_age, ie_gender; + { + auto plugin = cv::gimpl::ie::wrap::getPlugin(params); + auto net = cv::gimpl::ie::wrap::readNetwork(params); + setNetParameters(net); + auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params); + auto infer_request = this_network.CreateInferRequest(); + infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat)); + infer_request.Infer(); + ie_age = infer_request.GetBlob("age_conv3"); + ie_gender = infer_request.GetBlob("prob"); + } + + // Configure & run G-API + cv::GMat in; + GInferInputs inputs; + inputs["data"] = in; + + auto outputs = cv::gapi::infer("age-gender-generic", inputs); + + auto age = outputs.at("age_conv3"); + auto gender = outputs.at("prob"); + + cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender)); + + cv::gapi::ie::Params pp{"age-gender-generic", + params.model_path, + params.weights_path, + params.device_id}; + + comp.apply(cv::gin(in_mat), cv::gout(gapi_age, gapi_gender), + cv::compile_args(cv::gapi::networks(pp))); + + // Validate with IE itself (avoid DNN module dependency here) + normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" ); + normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output"); +} + } // namespace opencv_test #endif // HAVE_INF_ENGINE From 718dd9f170041995fe84ec01a614e9465b9f063f Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 9 Oct 2020 11:57:49 +0000 Subject: [PATCH 011/152] dnn(opencl): bypass unsupported fusion cases --- modules/dnn/src/dnn.cpp | 4 +++- modules/dnn/src/layers/convolution_layer.cpp | 10 ++++++++++ modules/dnn/test/test_common.impl.hpp | 4 ++-- modules/dnn/test/test_layers.cpp | 18 +++--------------- 4 files changed, 18 insertions(+), 18 deletions(-) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index c50dae7967..9ee688f497 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -2460,10 +2460,12 @@ struct Net::Impl : public detail::NetImplBase if( nextData ) nextActivLayer = nextData->layerInstance.dynamicCast(); + Ptr activ_power; if( !nextActivLayer.empty() && (!nextData->type.compare("ReLU") || !nextData->type.compare("ChannelsPReLU") || - !nextData->type.compare("Power")) && + (!nextData->type.compare("Power") && (activ_power = nextActivLayer.dynamicCast()) && activ_power->scale == 1.0f) + ) && currLayer->setActivation(nextActivLayer) ) { CV_Assert_N(biasLayerData->outputBlobsWrappers.size() == 1, ld.inputBlobsWrappers.size() == 1); diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index ddc318def2..206ce72fa0 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -46,6 +46,8 @@ #include "../op_inf_engine.hpp" #include "../ie_ngraph.hpp" +#include + #include "opencv2/core/hal/hal.hpp" #include "opencv2/core/hal/intrin.hpp" #include @@ -371,6 +373,14 @@ public: Ptr activ_power = activ.dynamicCast(); if (!activ_power.empty()) { + if (activ_power->scale != 1.0f) // not supported well by implementation, #17964 + { + // FIXIT no way to check number of blobs (like, eltwise input) + CV_LOG_INFO(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)"); + activ.release(); + newActiv = false; + return false; + } if (activ_power->scale != 1.f || activ_power->shift != 0.f) { const int outCh = blobs[0].size[0]; diff --git a/modules/dnn/test/test_common.impl.hpp b/modules/dnn/test/test_common.impl.hpp index 559b74f126..e55e6cb7b3 100644 --- a/modules/dnn/test/test_common.impl.hpp +++ b/modules/dnn/test/test_common.impl.hpp @@ -63,10 +63,10 @@ void normAssert( double l1 /*= 0.00001*/, double lInf /*= 0.0001*/) { double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); - EXPECT_LE(normL1, l1) << comment; + EXPECT_LE(normL1, l1) << comment << " |ref| = " << cvtest::norm(ref, cv::NORM_INF); double normInf = cvtest::norm(ref, test, cv::NORM_INF); - EXPECT_LE(normInf, lInf) << comment; + EXPECT_LE(normInf, lInf) << comment << " |ref| = " << cvtest::norm(ref, cv::NORM_INF); } std::vector matToBoxes(const cv::Mat& m) diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index e61b754b86..3872f562ef 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -2219,10 +2219,6 @@ TEST_P(ConvolutionActivationFusion, Accuracy) Backend backendId = get<0>(get<2>(GetParam())); Target targetId = get<1>(get<2>(GetParam())); - // bug: https://github.com/opencv/opencv/issues/17964 - if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) - applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - Net net; int convId = net.addLayer(convParams.name, convParams.type, convParams); int activId = net.addLayerToPrev(activationParams.name, activationParams.type, activationParams); @@ -2235,7 +2231,7 @@ TEST_P(ConvolutionActivationFusion, Accuracy) expectedFusedLayers.push_back(activId); // all activations are fused else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) { - if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power") + if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/) expectedFusedLayers.push_back(activId); } } @@ -2349,10 +2345,6 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy) if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - // bug: https://github.com/opencv/opencv/issues/17964 - if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) - applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - Net net; int convId = net.addLayer(convParams.name, convParams.type, convParams); int eltwiseId = net.addLayer(eltwiseParams.name, eltwiseParams.type, eltwiseParams); @@ -2369,7 +2361,7 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy) expectedFusedLayers.push_back(activId); // activation is fused with eltwise layer else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) { - if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "Power") + if (actType == "ReLU" || actType == "ChannelsPReLU" /*|| actType == "Power"*/) { expectedFusedLayers.push_back(eltwiseId); expectedFusedLayers.push_back(activId); @@ -2431,10 +2423,6 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy) Backend backendId = get<0>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam())); - // bug: https://github.com/opencv/opencv/issues/17964 - if (actType == "Power" && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) - applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - Net net; int convId = net.addLayer(convParams.name, convParams.type, convParams); int activId = net.addLayer(activationParams.name, activationParams.type, activationParams); @@ -2451,7 +2439,7 @@ TEST_P(ConvolutionActivationEltwiseFusion, Accuracy) expectedFusedLayers.push_back(activId); // activation fused with convolution else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) { - if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" || actType == "Power") + if (actType == "ReLU" || actType == "ChannelsPReLU" || actType == "ReLU6" || actType == "TanH" /*|| actType == "Power"*/) expectedFusedLayers.push_back(activId); // activation fused with convolution } } From cdcf7e62f37f8476eb209439fe94b51e8a93846c Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 9 Oct 2020 16:33:48 +0000 Subject: [PATCH 012/152] dnn(opencl): bypass unsupported fusion cases 2 --- modules/dnn/src/dnn.cpp | 59 +++++++++++++++----- modules/dnn/src/layers/convolution_layer.cpp | 2 +- modules/dnn/test/test_layers.cpp | 8 +-- 3 files changed, 50 insertions(+), 19 deletions(-) diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index 9ee688f497..c789638793 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -2413,14 +2413,42 @@ struct Net::Impl : public detail::NetImplBase } // fuse convolution layer followed by eltwise + relu - if ( IS_DNN_OPENCL_TARGET(preferableTarget) && ld.layerInstance->type == "Convolution" ) + while (nextData && IS_DNN_OPENCL_TARGET(preferableTarget) && ld.layerInstance->type == "Convolution") // semantic of 'if' { - Ptr nextEltwiseLayer; - if( nextData ) - nextEltwiseLayer = nextData->layerInstance.dynamicCast(); + Ptr nextEltwiseLayer = nextData->layerInstance.dynamicCast(); + if (nextEltwiseLayer.empty()) + break; + + if (pinsToKeep.count(lpNext) != 0) + break; + if (nextData->inputBlobsId.size() != 2) + break; + + if (!nextData->params.has("operation") || nextData->params.get("operation").toLowerCase() == "sum") + { + if (nextData->params.has("coeff")) + { + DictValue paramCoeff = nextData->params.get("coeff"); + int n = paramCoeff.size(); + bool isCoeffOneOne = (n == 2); + for (int i = 0; isCoeffOneOne && i < n; i++) + { + float c = paramCoeff.get(i); + isCoeffOneOne &= (c == 1.0f); + } + if (!isCoeffOneOne) + { + CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion of 'Sum' without coeffs (or {1.0, 1.0}) is supported only"); + break; + } + } + } + else + { + CV_LOG_DEBUG(NULL, "DNN/OpenCL: fusion with eltwise operation is not supported: " << nextData->params.get("operation")); + break; + } - if( !nextEltwiseLayer.empty() && pinsToKeep.count(lpNext) == 0 && - nextData && nextData->inputBlobsId.size() == 2 ) { LayerData *eltwiseData = nextData; @@ -2517,6 +2545,8 @@ struct Net::Impl : public detail::NetImplBase } } } + + break; } } @@ -2698,11 +2728,11 @@ struct Net::Impl : public detail::NetImplBase Ptr layer = ld.layerInstance; - TickMeter tm; - tm.start(); - if( !ld.skip ) { + TickMeter tm; + tm.start(); + std::map >::iterator it = ld.backendNodes.find(preferableBackend); if (preferableBackend == DNN_BACKEND_OPENCV || it == ld.backendNodes.end() || it->second.empty()) { @@ -2881,12 +2911,15 @@ struct Net::Impl : public detail::NetImplBase CV_Error(Error::StsNotImplemented, "Unknown backend identifier"); } } + + tm.stop(); + int64 t = tm.getTimeTicks(); + layersTimings[ld.id] = (t > 0) ? t : t + 1; // zero for skipped layers only } else - tm.reset(); - - tm.stop(); - layersTimings[ld.id] = tm.getTimeTicks(); + { + layersTimings[ld.id] = 0; + } ld.flag = 1; } diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 206ce72fa0..473c07b755 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -376,7 +376,7 @@ public: if (activ_power->scale != 1.0f) // not supported well by implementation, #17964 { // FIXIT no way to check number of blobs (like, eltwise input) - CV_LOG_INFO(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)"); + CV_LOG_DEBUG(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)"); activ.release(); newActiv = false; return false; diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index 3872f562ef..327f3e9abd 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -2341,10 +2341,6 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy) Backend backendId = get<0>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam())); - // bug: https://github.com/opencv/opencv/issues/17945 - if ((eltwiseOp != "sum" || weightedEltwise) && backendId == DNN_BACKEND_OPENCV && (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16)) - applyTestTag(CV_TEST_TAG_DNN_SKIP_OPENCL); - Net net; int convId = net.addLayer(convParams.name, convParams.type, convParams); int eltwiseId = net.addLayer(eltwiseParams.name, eltwiseParams.type, eltwiseParams); @@ -2361,7 +2357,9 @@ TEST_P(ConvolutionEltwiseActivationFusion, Accuracy) expectedFusedLayers.push_back(activId); // activation is fused with eltwise layer else if (targetId == DNN_TARGET_OPENCL || targetId == DNN_TARGET_OPENCL_FP16) { - if (actType == "ReLU" || actType == "ChannelsPReLU" /*|| actType == "Power"*/) + if (eltwiseOp == "sum" && !weightedEltwise && + (actType == "ReLU" || actType == "ChannelsPReLU" /*|| actType == "Power"*/) + ) { expectedFusedLayers.push_back(eltwiseId); expectedFusedLayers.push_back(activId); From 171fbf879fa5b2a991192cae72eb5fed61cec522 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 9 Oct 2020 22:00:02 +0000 Subject: [PATCH 013/152] cmake: fix typo in CUDA_GENERATION=Auto cache --- cmake/OpenCVDetectCUDA.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/OpenCVDetectCUDA.cmake b/cmake/OpenCVDetectCUDA.cmake index c55f9f1903..c7cfebe50f 100644 --- a/cmake/OpenCVDetectCUDA.cmake +++ b/cmake/OpenCVDetectCUDA.cmake @@ -198,7 +198,7 @@ if(CUDA_FOUND) if(${status} EQUAL 0) # cache detected values - set(OPENCV_CACHE_CUDA_ACTIVE_CC ${${result_list}} CACHE INTERNAL "") + set(OPENCV_CACHE_CUDA_ACTIVE_CC ${${output}} CACHE INTERNAL "") set(OPENCV_CACHE_CUDA_ACTIVE_CC_check "${__cache_key_check}" CACHE INTERNAL "") endif() endif() From 2dd2d6095584b957b243bb86948b13223ecb39b3 Mon Sep 17 00:00:00 2001 From: maxint Date: Sat, 10 Oct 2020 07:13:23 +0000 Subject: [PATCH 014/152] Fix warnings: "-Wrange-loop-construct" in gapi --- modules/gapi/src/backends/fluid/gfluidbackend.cpp | 4 ++-- modules/gapi/src/compiler/passes/exec.cpp | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/modules/gapi/src/backends/fluid/gfluidbackend.cpp b/modules/gapi/src/backends/fluid/gfluidbackend.cpp index 9b95dff036..030bb10198 100644 --- a/modules/gapi/src/backends/fluid/gfluidbackend.cpp +++ b/modules/gapi/src/backends/fluid/gfluidbackend.cpp @@ -952,7 +952,7 @@ namespace using namespace cv::gimpl; GModel::Graph g(graph); GFluidModel fg(graph); - for (const auto node : g.nodes()) + for (const auto& node : g.nodes()) { if (g.metadata(node).get().t == NodeType::DATA) { @@ -1440,7 +1440,7 @@ void GFluidBackendImpl::addMetaSensitiveBackendPasses(ade::ExecutionEngineSetupC { // Add FluidData to all data nodes inside island, // set internal = true if node is not a slot in terms of higher-level GIslandModel - for (const auto node : isl->contents()) + for (const auto& node : isl->contents()) { if (g.metadata(node).get().t == NodeType::DATA && !fg.metadata(node).contains()) diff --git a/modules/gapi/src/compiler/passes/exec.cpp b/modules/gapi/src/compiler/passes/exec.cpp index 755538bb46..0eb8352b76 100644 --- a/modules/gapi/src/compiler/passes/exec.cpp +++ b/modules/gapi/src/compiler/passes/exec.cpp @@ -71,12 +71,12 @@ namespace all.insert(src_g.nodes().begin(), src_g.nodes().end()); - for (const auto nh : proto.in_nhs) + for (const auto& nh : proto.in_nhs) { all.erase(nh); in_ops.insert(nh->outNodes().begin(), nh->outNodes().end()); } - for (const auto nh : proto.out_nhs) + for (const auto& nh : proto.out_nhs) { all.erase(nh); out_ops.insert(nh->inNodes().begin(), nh->inNodes().end()); @@ -90,12 +90,12 @@ namespace auto ih = GIslandModel::mkIslandNode(g, std::move(isl)); - for (const auto nh : proto.in_nhs) + for (const auto& nh : proto.in_nhs) { auto slot = GIslandModel::mkSlotNode(g, nh); g.link(slot, ih); } - for (const auto nh : proto.out_nhs) + for (const auto& nh : proto.out_nhs) { auto slot = GIslandModel::mkSlotNode(g, nh); g.link(ih, slot); From dc15187f1b6784ef2ece30dae223570811eaddff Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 10 Oct 2020 20:14:29 +0000 Subject: [PATCH 015/152] release: OpenCV 3.4.12 --- modules/core/include/opencv2/core/version.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/include/opencv2/core/version.hpp b/modules/core/include/opencv2/core/version.hpp index 00172f809e..c5d015afe0 100644 --- a/modules/core/include/opencv2/core/version.hpp +++ b/modules/core/include/opencv2/core/version.hpp @@ -8,7 +8,7 @@ #define CV_VERSION_MAJOR 3 #define CV_VERSION_MINOR 4 #define CV_VERSION_REVISION 12 -#define CV_VERSION_STATUS "-pre" +#define CV_VERSION_STATUS "" #define CVAUX_STR_EXP(__A) #__A #define CVAUX_STR(__A) CVAUX_STR_EXP(__A) From e58da86efc38484b1c660263405326bcb22594c9 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 10 Oct 2020 21:34:15 +0000 Subject: [PATCH 016/152] dnn(test): update tests for OpenVINO 2021.1 (OpenCV 4.x) --- modules/dnn/test/test_model.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/dnn/test/test_model.cpp b/modules/dnn/test/test_model.cpp index 215cc1c743..ddec6e79e4 100644 --- a/modules/dnn/test/test_model.cpp +++ b/modules/dnn/test/test_model.cpp @@ -363,7 +363,7 @@ TEST_P(Test_Model, Detection_normalized) scoreDiff = 5e-3; iouDiff = 0.09; } -#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2020040000) +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_GE(2020040000) if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target == DNN_TARGET_MYRIAD) { iouDiff = 0.095f; From 16561ed71e49b11d9790d3c161c65bdd3c0a2992 Mon Sep 17 00:00:00 2001 From: mtfrctl Date: Sun, 11 Oct 2020 19:16:03 +0900 Subject: [PATCH 017/152] Add data pointer bridge method to Mat for Objective-C/Swift --- modules/core/misc/objc/common/Mat.h | 1 + modules/core/misc/objc/common/Mat.mm | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/modules/core/misc/objc/common/Mat.h b/modules/core/misc/objc/common/Mat.h index 72f81dd9b7..229337f524 100644 --- a/modules/core/misc/objc/common/Mat.h +++ b/modules/core/misc/objc/common/Mat.h @@ -97,6 +97,7 @@ CV_EXPORTS @interface Mat : NSObject - (void)createEx:(NSArray*)sizes type:(int)type NS_SWIFT_NAME(create(sizes:type:)); - (void)copySize:(Mat*)mat; - (Mat*)cross:(Mat*)mat; +- (unsigned char*)dataPtr NS_SWIFT_NAME(dataPointer()); - (int)depth; - (Mat*)diag:(int)diagonal; - (Mat*)diag; diff --git a/modules/core/misc/objc/common/Mat.mm b/modules/core/misc/objc/common/Mat.mm index c075e26046..5d41a3622e 100644 --- a/modules/core/misc/objc/common/Mat.mm +++ b/modules/core/misc/objc/common/Mat.mm @@ -286,6 +286,10 @@ static bool updateIdx(cv::Mat* mat, std::vector& indices, int inc) { return [[Mat alloc] initWithNativeMat:new cv::Mat(_nativePtr->cross(*(cv::Mat*)mat.nativePtr))]; } +- (unsigned char*)dataPtr { + return _nativePtr->data; +} + - (int)depth { return _nativePtr->depth(); } From de93782fab107ffc2193fb23d98916aba1787f37 Mon Sep 17 00:00:00 2001 From: catree Date: Sun, 11 Oct 2020 17:18:05 +0200 Subject: [PATCH 018/152] Move colorscale_deepgreen.jpg to the correct folder. --- .../pics/{ => colormaps}/colorscale_deepgreen.jpg | Bin 1 file changed, 0 insertions(+), 0 deletions(-) rename modules/imgproc/doc/pics/{ => colormaps}/colorscale_deepgreen.jpg (100%) diff --git a/modules/imgproc/doc/pics/colorscale_deepgreen.jpg b/modules/imgproc/doc/pics/colormaps/colorscale_deepgreen.jpg similarity index 100% rename from modules/imgproc/doc/pics/colorscale_deepgreen.jpg rename to modules/imgproc/doc/pics/colormaps/colorscale_deepgreen.jpg From d5fd2f0155ffad366f9ac912dfd6d189a7a6a98e Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sun, 11 Oct 2020 21:26:07 +0000 Subject: [PATCH 019/152] release: OpenCV 4.5.0 --- modules/core/include/opencv2/core/version.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/include/opencv2/core/version.hpp b/modules/core/include/opencv2/core/version.hpp index 7026fad88f..9e8b1b06ed 100644 --- a/modules/core/include/opencv2/core/version.hpp +++ b/modules/core/include/opencv2/core/version.hpp @@ -8,7 +8,7 @@ #define CV_VERSION_MAJOR 4 #define CV_VERSION_MINOR 5 #define CV_VERSION_REVISION 0 -#define CV_VERSION_STATUS "-pre" +#define CV_VERSION_STATUS "" #define CVAUX_STR_EXP(__A) #__A #define CVAUX_STR(__A) CVAUX_STR_EXP(__A) From 1048feac3b1252884b0ab91a526aa58f3621f348 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 13 Oct 2020 13:41:07 +0000 Subject: [PATCH 020/152] build: winpack_dldt with videoio plugins --- platforms/winpack_dldt/build_package.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/platforms/winpack_dldt/build_package.py b/platforms/winpack_dldt/build_package.py index 8618e11cbf..05991da6b4 100644 --- a/platforms/winpack_dldt/build_package.py +++ b/platforms/winpack_dldt/build_package.py @@ -350,6 +350,8 @@ class Builder: INSTALL_PDB='ON', INSTALL_PDB_COMPONENT_EXCLUDE_FROM_ALL='OFF', + VIDEOIO_PLUGIN_LIST='all', + OPENCV_SKIP_CMAKE_ROOT_CONFIG='ON', OPENCV_BIN_INSTALL_PATH='bin', OPENCV_INCLUDE_INSTALL_PATH='include', From 25163eb008910f6579c5582300994ce0763069fe Mon Sep 17 00:00:00 2001 From: Welton Rodrigo Torres Nascimento Date: Tue, 13 Oct 2020 11:51:23 -0300 Subject: [PATCH 021/152] Doc: INTER_LINEAR_EXACT unsupported in remap Update documentation to reflect INTER_LINEAR_EXACT being unsupported in remap --- modules/imgproc/include/opencv2/imgproc.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index c73a382ddf..d75f3bcffc 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -2320,8 +2320,8 @@ CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating poin representation to fixed-point for speed. @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map if map1 is (x,y) points), respectively. -@param interpolation Interpolation method (see #InterpolationFlags). The method #INTER_AREA is -not supported by this function. +@param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA +and #INTER_LINEAR_EXACT are not supported by this function. @param borderMode Pixel extrapolation method (see #BorderTypes). When borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that corresponds to the "outliers" in the source image are not modified by the function. From 09a62012b2e429d4e4ee8f27fdf5cdffd107c148 Mon Sep 17 00:00:00 2001 From: Welton Rodrigo Torres Nascimento Date: Tue, 13 Oct 2020 11:07:11 -0300 Subject: [PATCH 022/152] Doc update. INTER_LINEAR_EXACT unsupported in remap --- modules/gapi/include/opencv2/gapi/core.hpp | 4 ++-- modules/imgproc/include/opencv2/imgproc.hpp | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/gapi/include/opencv2/gapi/core.hpp b/modules/gapi/include/opencv2/gapi/core.hpp index c4ddaf6bd3..2c01328f09 100644 --- a/modules/gapi/include/opencv2/gapi/core.hpp +++ b/modules/gapi/include/opencv2/gapi/core.hpp @@ -1491,8 +1491,8 @@ Output image must be of the same size and depth as input one. CV_32FC1, or CV_32FC2. @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map if map1 is (x,y) points), respectively. -@param interpolation Interpolation method (see cv::InterpolationFlags). The method INTER_AREA is -not supported by this function. +@param interpolation Interpolation method (see cv::InterpolationFlags). The methods #INTER_AREA +and #INTER_LINEAR_EXACT are not supported by this function. @param borderMode Pixel extrapolation method (see cv::BorderTypes). When borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image that corresponds to the "outliers" in the source image are not modified by the function. diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 4a41ba6b50..8309315ef4 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -2367,8 +2367,8 @@ CV_32FC1, or CV_32FC2. See convertMaps for details on converting a floating poin representation to fixed-point for speed. @param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map if map1 is (x,y) points), respectively. -@param interpolation Interpolation method (see #InterpolationFlags). The method #INTER_AREA is -not supported by this function. +@param interpolation Interpolation method (see #InterpolationFlags). The methods #INTER_AREA +and #INTER_LINEAR_EXACT are not supported by this function. @param borderMode Pixel extrapolation method (see #BorderTypes). When borderMode=#BORDER_TRANSPARENT, it means that the pixels in the destination image that corresponds to the "outliers" in the source image are not modified by the function. From 4c048a487e932cd5bf8b560294009bebbb8b8fe5 Mon Sep 17 00:00:00 2001 From: Anna Khakimova Date: Wed, 14 Oct 2020 19:51:35 +0300 Subject: [PATCH 023/152] Merge pull request #18516 from anna-khakimova:ak/bgr2rgb GAPI: Addition new Color conversion kernels to CPU backend. * Add BGR2RGB kernel to CPU backend * Add BGR2I420 and RGB2I420 kernels to CPU backend * Add I4202BGR and I4202RGB kernels to CPU backend --- modules/gapi/include/opencv2/gapi/imgproc.hpp | 120 ++++++++++++++++++ modules/gapi/src/api/kernels_imgproc.cpp | 25 ++++ modules/gapi/src/backends/cpu/gcpuimgproc.cpp | 45 +++++++ .../gapi/test/common/gapi_imgproc_tests.hpp | 5 + .../test/common/gapi_imgproc_tests_inl.hpp | 95 ++++++++++++++ .../gapi/test/cpu/gapi_imgproc_tests_cpu.cpp | 39 ++++++ 6 files changed, 329 insertions(+) diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp index b4905e932b..23ad41eb25 100644 --- a/modules/gapi/include/opencv2/gapi/imgproc.hpp +++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp @@ -124,6 +124,12 @@ namespace imgproc { } }; + G_TYPED_KERNEL(GBGR2RGB, , "org.opencv.imgproc.colorconvert.bgr2rgb") { + static GMatDesc outMeta(GMatDesc in) { + return in; // type still remains CV_8UC3; + } + }; + G_TYPED_KERNEL(GRGB2YUV, , "org.opencv.imgproc.colorconvert.rgb2yuv") { static GMatDesc outMeta(GMatDesc in) { return in; // type still remains CV_8UC3; @@ -136,6 +142,42 @@ namespace imgproc { } }; + G_TYPED_KERNEL(GBGR2I420, , "org.opencv.imgproc.colorconvert.bgr2i420") { + static GMatDesc outMeta(GMatDesc in) { + GAPI_Assert(in.depth == CV_8U); + GAPI_Assert(in.chan == 3); + GAPI_Assert(in.size.height % 2 == 0); + return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2)); + } + }; + + G_TYPED_KERNEL(GRGB2I420, , "org.opencv.imgproc.colorconvert.rgb2i420") { + static GMatDesc outMeta(GMatDesc in) { + GAPI_Assert(in.depth == CV_8U); + GAPI_Assert(in.chan == 3); + GAPI_Assert(in.size.height % 2 == 0); + return in.withType(in.depth, 1).withSize(Size(in.size.width, in.size.height * 3 / 2)); + } + }; + + G_TYPED_KERNEL(GI4202BGR, , "org.opencv.imgproc.colorconvert.i4202bgr") { + static GMatDesc outMeta(GMatDesc in) { + GAPI_Assert(in.depth == CV_8U); + GAPI_Assert(in.chan == 1); + GAPI_Assert(in.size.height % 3 == 0); + return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3)); + } + }; + + G_TYPED_KERNEL(GI4202RGB, , "org.opencv.imgproc.colorconvert.i4202rgb") { + static GMatDesc outMeta(GMatDesc in) { + GAPI_Assert(in.depth == CV_8U); + GAPI_Assert(in.chan == 1); + GAPI_Assert(in.size.height % 3 == 0); + return in.withType(in.depth, 3).withSize(Size(in.size.width, in.size.height * 2 / 3)); + } + }; + G_TYPED_KERNEL(GNV12toRGB, , "org.opencv.imgproc.colorconvert.nv12torgb") { static GMatDesc outMeta(GMatDesc in_y, GMatDesc in_uv) { GAPI_Assert(in_y.chan == 1); @@ -812,6 +854,20 @@ The algorithm normalizes the brightness and increases the contrast of the image. */ GAPI_EXPORTS GMat equalizeHist(const GMat& src); +/** @brief Converts an image from BGR color space to RGB color space. + +The function converts an input image from BGR color space to RGB. +The conventional ranges for B, G, and R channel values are 0 to 255. + +Output image is 8-bit unsigned 3-channel image @ref CV_8UC3. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2rgb" + +@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3. +@sa RGB2BGR +*/ +GAPI_EXPORTS GMat BGR2RGB(const GMat& src); + //! @} gapi_filters //! @addtogroup gapi_colorconvert @@ -871,6 +927,70 @@ Output image must be 8-bit unsigned 3-channel image @ref CV_8UC3. */ GAPI_EXPORTS GMat RGB2YUV(const GMat& src); +/** @brief Converts an image from BGR color space to I420 color space. + +The function converts an input image from BGR color space to I420. +The conventional ranges for R, G, and B channel values are 0 to 255. + +Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1. +Width of I420 output image must be the same as width of input image. +Height of I420 output image must be equal 3/2 from height of input image. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.bgr2i420" + +@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3. +@sa I4202BGR +*/ +GAPI_EXPORTS GMat BGR2I420(const GMat& src); + +/** @brief Converts an image from RGB color space to I420 color space. + +The function converts an input image from RGB color space to I420. +The conventional ranges for R, G, and B channel values are 0 to 255. + +Output image must be 8-bit unsigned 1-channel image. @ref CV_8UC1. +Width of I420 output image must be the same as width of input image. +Height of I420 output image must be equal 3/2 from height of input image. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.rgb2i420" + +@param src input image: 8-bit unsigned 3-channel image @ref CV_8UC3. +@sa I4202RGB +*/ +GAPI_EXPORTS GMat RGB2I420(const GMat& src); + +/** @brief Converts an image from I420 color space to BGR color space. + +The function converts an input image from I420 color space to BGR. +The conventional ranges for B, G, and R channel values are 0 to 255. + +Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3. +Width of BGR output image must be the same as width of input image. +Height of BGR output image must be equal 2/3 from height of input image. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202bgr" + +@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1. +@sa BGR2I420 +*/ +GAPI_EXPORTS GMat I4202BGR(const GMat& src); + +/** @brief Converts an image from I420 color space to BGR color space. + +The function converts an input image from I420 color space to BGR. +The conventional ranges for B, G, and R channel values are 0 to 255. + +Output image must be 8-bit unsigned 3-channel image. @ref CV_8UC3. +Width of RGB output image must be the same as width of input image. +Height of RGB output image must be equal 2/3 from height of input image. + +@note Function textual ID is "org.opencv.imgproc.colorconvert.i4202rgb" + +@param src input image: 8-bit unsigned 1-channel image @ref CV_8UC1. +@sa RGB2I420 +*/ +GAPI_EXPORTS GMat I4202RGB(const GMat& src); + /** @brief Converts an image from BGR color space to LUV color space. The function converts an input image from BGR color space to LUV. diff --git a/modules/gapi/src/api/kernels_imgproc.cpp b/modules/gapi/src/api/kernels_imgproc.cpp index 108eefcb81..652f83935f 100644 --- a/modules/gapi/src/api/kernels_imgproc.cpp +++ b/modules/gapi/src/api/kernels_imgproc.cpp @@ -115,6 +115,11 @@ cv::GArray goodFeaturesToTrack(const GMat& image, int maxCorners, d useHarrisDetector, k); } +GMat BGR2RGB(const GMat& src) +{ + return imgproc::GBGR2RGB::on(src); +} + GMat RGB2Gray(const GMat& src) { return imgproc::GRGB2Gray::on(src); @@ -160,6 +165,26 @@ GMat YUV2RGB(const GMat& src) return imgproc::GYUV2RGB::on(src); } +GMat BGR2I420(const GMat& src) +{ + return imgproc::GBGR2I420::on(src); +} + +GMat RGB2I420(const GMat& src) +{ + return imgproc::GRGB2I420::on(src); +} + +GMat I4202BGR(const GMat& src) +{ + return imgproc::GI4202BGR::on(src); +} + +GMat I4202RGB(const GMat& src) +{ + return imgproc::GI4202RGB::on(src); +} + GMat NV12toRGB(const GMat& src_y, const GMat& src_uv) { return imgproc::GNV12toRGB::on(src_y, src_uv); diff --git a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp index 8104565f03..c07ed6785c 100644 --- a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp +++ b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp @@ -211,6 +211,46 @@ GAPI_OCV_KERNEL(GCPUGoodFeatures, cv::gapi::imgproc::GGoodFeatures) } }; +GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_BGR2RGB); + } +}; + +GAPI_OCV_KERNEL(GCPUBGR2I420, cv::gapi::imgproc::GBGR2I420) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_BGR2YUV_I420); + } +}; + +GAPI_OCV_KERNEL(GCPURGB2I420, cv::gapi::imgproc::GRGB2I420) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_RGB2YUV_I420); + } +}; + +GAPI_OCV_KERNEL(GCPUI4202BGR, cv::gapi::imgproc::GI4202BGR) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_YUV2BGR_I420); + } +}; + +GAPI_OCV_KERNEL(GCPUI4202RGB, cv::gapi::imgproc::GI4202RGB) +{ + static void run(const cv::Mat& in, cv::Mat &out) + { + cv::cvtColor(in, out, cv::COLOR_YUV2RGB_I420); + } +}; + GAPI_OCV_KERNEL(GCPURGB2YUV, cv::gapi::imgproc::GRGB2YUV) { static void run(const cv::Mat& in, cv::Mat &out) @@ -445,8 +485,13 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels() , GCPUCanny , GCPUGoodFeatures , GCPUEqualizeHist + , GCPUBGR2RGB , GCPURGB2YUV , GCPUYUV2RGB + , GCPUBGR2I420 + , GCPURGB2I420 + , GCPUI4202BGR + , GCPUI4202RGB , GCPUNV12toRGB , GCPUNV12toBGR , GCPURGB2Lab diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp index cd074efda0..38a02985e7 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp @@ -64,9 +64,14 @@ GAPI_TEST_FIXTURE_SPEC_PARAMS(GoodFeaturesTest, double,int,bool), 8, cmpF, fileName, type, maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector) +GAPI_TEST_FIXTURE(BGR2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(RGB2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(BGR2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(RGB2YUVTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) +GAPI_TEST_FIXTURE(BGR2I420Test, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) +GAPI_TEST_FIXTURE(RGB2I420Test, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) +GAPI_TEST_FIXTURE(I4202BGRTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) +GAPI_TEST_FIXTURE(I4202RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(YUV2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(YUV2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(NV12toRGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) diff --git a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp index 4aadc17d5d..95728e87b7 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp @@ -447,6 +447,25 @@ TEST_P(GoodFeaturesTest, AccuracyTest) } } +TEST_P(BGR2RGBTest, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::BGR2RGB(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2RGB); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), sz); + } +} + TEST_P(RGB2GrayTest, AccuracyTest) { // G-API code ////////////////////////////////////////////////////////////// @@ -523,6 +542,82 @@ TEST_P(YUV2RGBTest, AccuracyTest) } } +TEST_P(BGR2I420Test, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::BGR2I420(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_BGR2YUV_I420); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2)); + } +} + +TEST_P(RGB2I420Test, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::RGB2I420(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_RGB2YUV_I420); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 3 / 2)); + } +} + +TEST_P(I4202BGRTest, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::I4202BGR(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2BGR_I420); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3)); + } +} + +TEST_P(I4202RGBTest, AccuracyTest) +{ + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::I4202RGB(in); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::cvtColor(in_mat1, out_mat_ocv, cv::COLOR_YUV2RGB_I420); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), Size(sz.width, sz.height * 2 / 3)); + } +} + TEST_P(NV12toRGBTest, AccuracyTest) { // G-API code ////////////////////////////////////////////////////////////// diff --git a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp index 8a94583fcc..e7f9667096 100644 --- a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp +++ b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp @@ -241,6 +241,13 @@ INSTANTIATE_TEST_CASE_P(GoodFeaturesInternalTestCPU, GoodFeaturesTest, Values(3), Values(true))); +INSTANTIATE_TEST_CASE_P(BGR2RGBTestCPU, BGR2RGBTest, + Combine(Values(CV_8UC3), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(CV_8UC3), + Values(IMGPROC_CPU), + Values(AbsExact().to_compare_obj()))); INSTANTIATE_TEST_CASE_P(RGB2GrayTestCPU, RGB2GrayTest, Combine(Values(CV_8UC3), @@ -274,6 +281,38 @@ INSTANTIATE_TEST_CASE_P(YUV2RGBTestCPU, YUV2RGBTest, Values(IMGPROC_CPU), Values(AbsExact().to_compare_obj()))); +INSTANTIATE_TEST_CASE_P(BGR2I420TestCPU, BGR2I420Test, + Combine(Values(CV_8UC3), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(CV_8UC1), + Values(IMGPROC_CPU), + Values(AbsExact().to_compare_obj()))); + +INSTANTIATE_TEST_CASE_P(RGB2I420TestCPU, RGB2I420Test, + Combine(Values(CV_8UC3), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(CV_8UC1), + Values(IMGPROC_CPU), + Values(AbsExact().to_compare_obj()))); + +INSTANTIATE_TEST_CASE_P(I4202BGRTestCPU, I4202BGRTest, + Combine(Values(CV_8UC1), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(CV_8UC3), + Values(IMGPROC_CPU), + Values(AbsExact().to_compare_obj()))); + +INSTANTIATE_TEST_CASE_P(I4202RGBTestCPU, I4202RGBTest, + Combine(Values(CV_8UC1), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(CV_8UC3), + Values(IMGPROC_CPU), + Values(AbsExact().to_compare_obj()))); + INSTANTIATE_TEST_CASE_P(NV12toRGBTestCPU, NV12toRGBTest, Combine(Values(CV_8UC1), Values(cv::Size(1280, 720), From 7de189114b5ab9b73144833440f58183b759fbe5 Mon Sep 17 00:00:00 2001 From: mtfrctl Date: Thu, 15 Oct 2020 01:58:06 +0900 Subject: [PATCH 024/152] Merge pull request #18547 from mtfrctl:objc-conversions-macosx Mat conversions for macOS/AppKit * Extract CoreGraphics conversion logics from ios_conversions.mm to apple_conversions.h, apple_conversions. Add macosx_conversions.mm * Add macosx.h * Add Mat+Conversions.h and Mat+Conversions.mm * Delete duplicated declaration from apple_conversion.mm * Use short license header * Add compile guard * Delete unused imports * Move precomp.hpp import from header to implementation * Add macosx.h to skip headers * Fix compile guard condition * Use short license header * Remove commented out unused code --- cmake/templates/opencv_abi.xml.in | 1 + modules/imgcodecs/CMakeLists.txt | 8 ++ .../imgcodecs/include/opencv2/imgcodecs.hpp | 1 + .../include/opencv2/imgcodecs/macosx.h | 20 ++++ .../misc/objc/macosx/Mat+Converters.h | 32 +++++++ .../misc/objc/macosx/Mat+Converters.mm | 44 +++++++++ modules/imgcodecs/src/apple_conversions.h | 11 +++ modules/imgcodecs/src/apple_conversions.mm | 94 ++++++++++++++++++ modules/imgcodecs/src/ios_conversions.mm | 96 ++----------------- modules/imgcodecs/src/macosx_conversions.mm | 25 +++++ modules/objc/generator/gen_objc.py | 5 + 11 files changed, 248 insertions(+), 89 deletions(-) create mode 100644 modules/imgcodecs/include/opencv2/imgcodecs/macosx.h create mode 100644 modules/imgcodecs/misc/objc/macosx/Mat+Converters.h create mode 100644 modules/imgcodecs/misc/objc/macosx/Mat+Converters.mm create mode 100644 modules/imgcodecs/src/apple_conversions.h create mode 100644 modules/imgcodecs/src/apple_conversions.mm create mode 100644 modules/imgcodecs/src/macosx_conversions.mm diff --git a/cmake/templates/opencv_abi.xml.in b/cmake/templates/opencv_abi.xml.in index 212b6d67d4..711c4e99ee 100644 --- a/cmake/templates/opencv_abi.xml.in +++ b/cmake/templates/opencv_abi.xml.in @@ -32,6 +32,7 @@ opencv2/flann/hdf5.h opencv2/imgcodecs/imgcodecs_c.h opencv2/imgcodecs/ios.h + opencv2/imgcodecs/macosx.h opencv2/videoio/videoio_c.h opencv2/videoio/cap_ios.h opencv2/xobjdetect/private.hpp diff --git a/modules/imgcodecs/CMakeLists.txt b/modules/imgcodecs/CMakeLists.txt index f8bfd18e1f..80f7e1c248 100644 --- a/modules/imgcodecs/CMakeLists.txt +++ b/modules/imgcodecs/CMakeLists.txt @@ -113,10 +113,18 @@ file(GLOB imgcodecs_ext_hdrs "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/legacy/*.h" ) +if(APPLE) + list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/apple_conversions.h) + list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/apple_conversions.mm) +endif() if(IOS) list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/ios_conversions.mm) list(APPEND IMGCODECS_LIBRARIES "-framework UIKit" "-framework AssetsLibrary") endif() +if(APPLE AND (NOT IOS)) + list(APPEND imgcodecs_srcs ${CMAKE_CURRENT_LIST_DIR}/src/macosx_conversions.mm) + list(APPEND IMGCODECS_LIBRARIES "-framework AppKit") +endif() if(APPLE_FRAMEWORK) list(APPEND IMGCODECS_LIBRARIES "-framework Accelerate" "-framework CoreGraphics" "-framework QuartzCore") endif() diff --git a/modules/imgcodecs/include/opencv2/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs.hpp index 97ca866e1b..c07a905914 100644 --- a/modules/imgcodecs/include/opencv2/imgcodecs.hpp +++ b/modules/imgcodecs/include/opencv2/imgcodecs.hpp @@ -50,6 +50,7 @@ @{ @defgroup imgcodecs_c C API @defgroup imgcodecs_ios iOS glue + @defgroup imgcodecs_macosx MacOS(OSX) glue @} */ diff --git a/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h b/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h new file mode 100644 index 0000000000..f5d9c082c4 --- /dev/null +++ b/modules/imgcodecs/include/opencv2/imgcodecs/macosx.h @@ -0,0 +1,20 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#if !defined(__APPLE__) || !defined(__MACH__) +#error This header should be used in macOS ObjC/Swift projects. +#endif + +#import +#include "opencv2/core.hpp" + +//! @addtogroup imgcodecs_macosx +//! @{ + +CV_EXPORTS CGImageRef MatToCGImage(const cv::Mat& image); +CV_EXPORTS void CGImageToMat(const CGImageRef image, cv::Mat& m, bool alphaExist = false); +CV_EXPORTS NSImage* MatToNSImage(const cv::Mat& image); +CV_EXPORTS void NSImageToMat(const NSImage* image, cv::Mat& m, bool alphaExist = false); + +//! @} diff --git a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h new file mode 100644 index 0000000000..4abf806d1e --- /dev/null +++ b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h @@ -0,0 +1,32 @@ +// +// Mat+Converters.h +// +// Created by Masaya Tsuruta on 2020/10/08. +// + +#pragma once + +#ifdef __cplusplus +#import "opencv.hpp" +#else +#define CV_EXPORTS +#endif + +#import +#import +#import "Mat.h" + +NS_ASSUME_NONNULL_BEGIN + +CV_EXPORTS @interface Mat (Converters) + +-(CGImageRef)toCGImage; +-(instancetype)initWithCGImage:(CGImageRef)image; +-(instancetype)initWithCGImage:(CGImageRef)image alphaExist:(BOOL)alphaExist; +-(NSImage*)toNSImage; +-(instancetype)initWithNSImage:(NSImage*)image; +-(instancetype)initWithNSImage:(NSImage*)image alphaExist:(BOOL)alphaExist; + +@end + +NS_ASSUME_NONNULL_END diff --git a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.mm b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.mm new file mode 100644 index 0000000000..725569784a --- /dev/null +++ b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.mm @@ -0,0 +1,44 @@ +// +// Mat+Converters.mm +// +// Created by Masaya Tsuruta on 2020/10/08. +// + +#import "Mat+Converters.h" +#import + +@implementation Mat (Converters) + +-(CGImageRef)toCGImage { + return MatToCGImage(self.nativeRef); +} + +-(instancetype)initWithCGImage:(CGImageRef)image { + return [self initWithCGImage:image alphaExist:NO]; +} + +-(instancetype)initWithCGImage:(CGImageRef)image alphaExist:(BOOL)alphaExist { + self = [self init]; + if (self) { + CGImageToMat(image, self.nativeRef, (bool)alphaExist); + } + return self; +} + +-(NSImage*)toNSImage { + return MatToNSImage(self.nativeRef); +} + +-(instancetype)initWithNSImage:(NSImage*)image { + return [self initWithNSImage:image alphaExist:NO]; +} + +-(instancetype)initWithNSImage:(NSImage*)image alphaExist:(BOOL)alphaExist { + self = [self init]; + if (self) { + NSImageToMat(image, self.nativeRef, (bool)alphaExist); + } + return self; +} + +@end diff --git a/modules/imgcodecs/src/apple_conversions.h b/modules/imgcodecs/src/apple_conversions.h new file mode 100644 index 0000000000..2762424379 --- /dev/null +++ b/modules/imgcodecs/src/apple_conversions.h @@ -0,0 +1,11 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#import +#import +#import +#include "opencv2/core.hpp" + +CV_EXPORTS CGImageRef MatToCGImage(const cv::Mat& image); +CV_EXPORTS void CGImageToMat(const CGImageRef image, cv::Mat& m, bool alphaExist); diff --git a/modules/imgcodecs/src/apple_conversions.mm b/modules/imgcodecs/src/apple_conversions.mm new file mode 100644 index 0000000000..6126039ce0 --- /dev/null +++ b/modules/imgcodecs/src/apple_conversions.mm @@ -0,0 +1,94 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "apple_conversions.h" +#include "precomp.hpp" + +CGImageRef MatToCGImage(const cv::Mat& image) { + NSData *data = [NSData dataWithBytes:image.data + length:image.step.p[0] * image.rows]; + + CGColorSpaceRef colorSpace; + + if (image.elemSize() == 1) { + colorSpace = CGColorSpaceCreateDeviceGray(); + } else { + colorSpace = CGColorSpaceCreateDeviceRGB(); + } + + CGDataProviderRef provider = + CGDataProviderCreateWithCFData((__bridge CFDataRef)data); + + // Preserve alpha transparency, if exists + bool alpha = image.channels() == 4; + CGBitmapInfo bitmapInfo = (alpha ? kCGImageAlphaLast : kCGImageAlphaNone) | kCGBitmapByteOrderDefault; + + // Creating CGImage from cv::Mat + CGImageRef imageRef = CGImageCreate(image.cols, + image.rows, + 8 * image.elemSize1(), + 8 * image.elemSize(), + image.step.p[0], + colorSpace, + bitmapInfo, + provider, + NULL, + false, + kCGRenderingIntentDefault + ); + + CGDataProviderRelease(provider); + CGColorSpaceRelease(colorSpace); + + return imageRef; +} + +void CGImageToMat(const CGImageRef image, cv::Mat& m, bool alphaExist) { + CGColorSpaceRef colorSpace = CGImageGetColorSpace(image); + CGFloat cols = CGImageGetWidth(image), rows = CGImageGetHeight(image); + CGContextRef contextRef; + CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast; + if (CGColorSpaceGetModel(colorSpace) == kCGColorSpaceModelMonochrome) + { + m.create(rows, cols, CV_8UC1); // 8 bits per component, 1 channel + bitmapInfo = kCGImageAlphaNone; + if (!alphaExist) + bitmapInfo = kCGImageAlphaNone; + else + m = cv::Scalar(0); + contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8, + m.step[0], colorSpace, + bitmapInfo); + } + else if (CGColorSpaceGetModel(colorSpace) == kCGColorSpaceModelIndexed) + { + // CGBitmapContextCreate() does not support indexed color spaces. + colorSpace = CGColorSpaceCreateDeviceRGB(); + m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels + if (!alphaExist) + bitmapInfo = kCGImageAlphaNoneSkipLast | + kCGBitmapByteOrderDefault; + else + m = cv::Scalar(0); + contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8, + m.step[0], colorSpace, + bitmapInfo); + CGColorSpaceRelease(colorSpace); + } + else + { + m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels + if (!alphaExist) + bitmapInfo = kCGImageAlphaNoneSkipLast | + kCGBitmapByteOrderDefault; + else + m = cv::Scalar(0); + contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8, + m.step[0], colorSpace, + bitmapInfo); + } + CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), + image); + CGContextRelease(contextRef); +} diff --git a/modules/imgcodecs/src/ios_conversions.mm b/modules/imgcodecs/src/ios_conversions.mm index 53fb788d65..2aba323a2d 100644 --- a/modules/imgcodecs/src/ios_conversions.mm +++ b/modules/imgcodecs/src/ios_conversions.mm @@ -41,105 +41,23 @@ //M*/ #import -#import -#import -#import -#include "opencv2/core.hpp" -#include "precomp.hpp" +#include "apple_conversions.h" CV_EXPORTS UIImage* MatToUIImage(const cv::Mat& image); CV_EXPORTS void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist); UIImage* MatToUIImage(const cv::Mat& image) { - - NSData *data = [NSData dataWithBytes:image.data - length:image.step.p[0] * image.rows]; - - CGColorSpaceRef colorSpace; - - if (image.elemSize() == 1) { - colorSpace = CGColorSpaceCreateDeviceGray(); - } else { - colorSpace = CGColorSpaceCreateDeviceRGB(); - } - - CGDataProviderRef provider = - CGDataProviderCreateWithCFData((__bridge CFDataRef)data); - - // Preserve alpha transparency, if exists - bool alpha = image.channels() == 4; - CGBitmapInfo bitmapInfo = (alpha ? kCGImageAlphaLast : kCGImageAlphaNone) | kCGBitmapByteOrderDefault; - // Creating CGImage from cv::Mat - CGImageRef imageRef = CGImageCreate(image.cols, - image.rows, - 8 * image.elemSize1(), - 8 * image.elemSize(), - image.step.p[0], - colorSpace, - bitmapInfo, - provider, - NULL, - false, - kCGRenderingIntentDefault - ); - + CGImageRef imageRef = MatToCGImage(image); // Getting UIImage from CGImage - UIImage *finalImage = [UIImage imageWithCGImage:imageRef]; + UIImage *uiImage = [UIImage imageWithCGImage:imageRef]; CGImageRelease(imageRef); - CGDataProviderRelease(provider); - CGColorSpaceRelease(colorSpace); - return finalImage; + return uiImage; } -void UIImageToMat(const UIImage* image, - cv::Mat& m, bool alphaExist) { - CGColorSpaceRef colorSpace = CGImageGetColorSpace(image.CGImage); - CGFloat cols = CGImageGetWidth(image.CGImage), rows = CGImageGetHeight(image.CGImage); - CGContextRef contextRef; - CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedLast; - if (CGColorSpaceGetModel(colorSpace) == kCGColorSpaceModelMonochrome) - { - m.create(rows, cols, CV_8UC1); // 8 bits per component, 1 channel - bitmapInfo = kCGImageAlphaNone; - if (!alphaExist) - bitmapInfo = kCGImageAlphaNone; - else - m = cv::Scalar(0); - contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8, - m.step[0], colorSpace, - bitmapInfo); - } - else if (CGColorSpaceGetModel(colorSpace) == kCGColorSpaceModelIndexed) - { - // CGBitmapContextCreate() does not support indexed color spaces. - colorSpace = CGColorSpaceCreateDeviceRGB(); - m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels - if (!alphaExist) - bitmapInfo = kCGImageAlphaNoneSkipLast | - kCGBitmapByteOrderDefault; - else - m = cv::Scalar(0); - contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8, - m.step[0], colorSpace, - bitmapInfo); - CGColorSpaceRelease(colorSpace); - } - else - { - m.create(rows, cols, CV_8UC4); // 8 bits per component, 4 channels - if (!alphaExist) - bitmapInfo = kCGImageAlphaNoneSkipLast | - kCGBitmapByteOrderDefault; - else - m = cv::Scalar(0); - contextRef = CGBitmapContextCreate(m.data, m.cols, m.rows, 8, - m.step[0], colorSpace, - bitmapInfo); - } - CGContextDrawImage(contextRef, CGRectMake(0, 0, cols, rows), - image.CGImage); - CGContextRelease(contextRef); +void UIImageToMat(const UIImage* image, cv::Mat& m, bool alphaExist) { + CGImageRef imageRef = image.CGImage; + CGImageToMat(imageRef, m, alphaExist); } diff --git a/modules/imgcodecs/src/macosx_conversions.mm b/modules/imgcodecs/src/macosx_conversions.mm new file mode 100644 index 0000000000..c1827e71f1 --- /dev/null +++ b/modules/imgcodecs/src/macosx_conversions.mm @@ -0,0 +1,25 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#import +#include "apple_conversions.h" + +CV_EXPORTS NSImage* MatToNSImage(const cv::Mat& image); +CV_EXPORTS void NSImageToMat(const NSImage* image, cv::Mat& m, bool alphaExist); + +NSImage* MatToNSImage(const cv::Mat& image) { + // Creating CGImage from cv::Mat + CGImageRef imageRef = MatToCGImage(image); + + // Getting NSImage from CGImage + NSImage *nsImage = [[NSImage alloc] initWithCGImage:imageRef size:CGSizeMake(CGImageGetWidth(imageRef), CGImageGetHeight(imageRef))]; + CGImageRelease(imageRef); + + return nsImage; +} + +void NSImageToMat(const NSImage* image, cv::Mat& m, bool alphaExist) { + CGImageRef imageRef = [image CGImageForProposedRect:NULL context:NULL hints:NULL]; + CGImageToMat(imageRef, m, alphaExist); +} diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py index 1ae00ab5f1..8c4a5b2cac 100755 --- a/modules/objc/generator/gen_objc.py +++ b/modules/objc/generator/gen_objc.py @@ -1584,6 +1584,11 @@ if __name__ == "__main__": if os.path.exists(ios_files_dir): copied_files += copy_objc_files(ios_files_dir, objc_base_path, module, True) + if args.target == 'osx': + osx_files_dir = os.path.join(misc_location, 'macosx') + if os.path.exists(osx_files_dir): + copied_files += copy_objc_files(osx_files_dir, objc_base_path, module, True) + objc_test_files_dir = os.path.join(misc_location, 'test') if os.path.exists(objc_test_files_dir): copy_objc_files(objc_test_files_dir, objc_test_base_path, 'test', False) From 0d3e05f9b3a3c8223d9e80d89d8176fdbe3158a4 Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Thu, 15 Oct 2020 01:21:09 +0300 Subject: [PATCH 025/152] Merge pull request #18493 from TolyaTalamanov:at/wrap-streaming [G-API Wrap streaming * Wrap streaming * Fix build * Add comments * Remove comment * Fix comments to review * Add test for python pull overload --- .../include/opencv2/gapi/gcomputation.hpp | 4 +- modules/gapi/include/opencv2/gapi/gproto.hpp | 2 +- .../gapi/include/opencv2/gapi/gstreaming.hpp | 17 ++- modules/gapi/include/opencv2/gapi/imgproc.hpp | 2 +- .../include/opencv2/gapi/streaming/cap.hpp | 6 + modules/gapi/misc/python/pyopencv_gapi.hpp | 32 +++++ modules/gapi/misc/python/shadow_gapi.hpp | 11 ++ .../misc/python/test/test_gapi_streaming.py | 129 ++++++++++++++++++ modules/gapi/src/compiler/gcompiler.cpp | 8 ++ modules/gapi/src/compiler/gstreaming.cpp | 33 +++++ modules/gapi/src/compiler/gstreaming_priv.hpp | 6 + .../test/streaming/gapi_streaming_tests.cpp | 35 +++++ 12 files changed, 274 insertions(+), 11 deletions(-) create mode 100644 modules/gapi/misc/python/test/test_gapi_streaming.py diff --git a/modules/gapi/include/opencv2/gapi/gcomputation.hpp b/modules/gapi/include/opencv2/gapi/gcomputation.hpp index 1172c0f5d6..8732ada0d6 100644 --- a/modules/gapi/include/opencv2/gapi/gcomputation.hpp +++ b/modules/gapi/include/opencv2/gapi/gcomputation.hpp @@ -436,7 +436,7 @@ public: * * @sa @ref gapi_compile_args */ - GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {}); + GAPI_WRAP GStreamingCompiled compileStreaming(GMetaArgs &&in_metas, GCompileArgs &&args = {}); /** * @brief Compile the computation for streaming mode. @@ -457,7 +457,7 @@ public: * * @sa @ref gapi_compile_args */ - GStreamingCompiled compileStreaming(GCompileArgs &&args = {}); + GAPI_WRAP GStreamingCompiled compileStreaming(GCompileArgs &&args = {}); // 2. Direct metadata version /** diff --git a/modules/gapi/include/opencv2/gapi/gproto.hpp b/modules/gapi/include/opencv2/gapi/gproto.hpp index fbcccb38ea..f91fcdb2c8 100644 --- a/modules/gapi/include/opencv2/gapi/gproto.hpp +++ b/modules/gapi/include/opencv2/gapi/gproto.hpp @@ -135,7 +135,7 @@ GRunArg value_of(const GOrigin &origin); // Transform run-time computation arguments into a collection of metadata // extracted from that arguments GMetaArg GAPI_EXPORTS descr_of(const GRunArg &arg ); -GMetaArgs GAPI_EXPORTS descr_of(const GRunArgs &args); +GMetaArgs GAPI_EXPORTS_W descr_of(const GRunArgs &args); // Transform run-time operation result argument into metadata extracted from that argument // Used to compare the metadata, which generated at compile time with the metadata result operation in run time diff --git a/modules/gapi/include/opencv2/gapi/gstreaming.hpp b/modules/gapi/include/opencv2/gapi/gstreaming.hpp index 7079042069..f45c30bdae 100644 --- a/modules/gapi/include/opencv2/gapi/gstreaming.hpp +++ b/modules/gapi/include/opencv2/gapi/gstreaming.hpp @@ -49,11 +49,11 @@ namespace cv { * * @sa GCompiled */ -class GAPI_EXPORTS GStreamingCompiled +class GAPI_EXPORTS_W_SIMPLE GStreamingCompiled { public: class GAPI_EXPORTS Priv; - GStreamingCompiled(); + GAPI_WRAP GStreamingCompiled(); // FIXME: More overloads? /** @@ -96,7 +96,7 @@ public: * @param ins vector of inputs to process. * @sa gin */ - void setSource(GRunArgs &&ins); + GAPI_WRAP void setSource(GRunArgs &&ins); /** * @brief Specify an input video stream for a single-input @@ -109,7 +109,7 @@ public: * @param s a shared pointer to IStreamSource representing the * input video stream. */ - void setSource(const gapi::wip::IStreamSource::Ptr& s); + GAPI_WRAP void setSource(const gapi::wip::IStreamSource::Ptr& s); /** * @brief Start the pipeline execution. @@ -126,7 +126,7 @@ public: * start()/stop()/setSource() may be called on the same object in * multiple threads in your application. */ - void start(); + GAPI_WRAP void start(); /** * @brief Get the next processed frame from the pipeline. @@ -150,6 +150,9 @@ public: */ bool pull(cv::GRunArgsP &&outs); + // NB: Used from python + GAPI_WRAP std::tuple pull(); + /** * @brief Try to get the next processed frame from the pipeline. * @@ -172,7 +175,7 @@ public: * * Throws if the pipeline is not running. */ - void stop(); + GAPI_WRAP void stop(); /** * @brief Test if the pipeline is running. @@ -184,7 +187,7 @@ public: * * @return true if the current stream is not over yet. */ - bool running() const; + GAPI_WRAP bool running() const; /// @private Priv& priv(); diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp index 23ad41eb25..294b3b7842 100644 --- a/modules/gapi/include/opencv2/gapi/imgproc.hpp +++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp @@ -497,7 +497,7 @@ The median filter uses cv::BORDER_REPLICATE internally to cope with border pixel @param ksize aperture linear size; it must be odd and greater than 1, for example: 3, 5, 7 ... @sa boxFilter, gaussianBlur */ -GAPI_EXPORTS GMat medianBlur(const GMat& src, int ksize); +GAPI_EXPORTS_W GMat medianBlur(const GMat& src, int ksize); /** @brief Erodes an image by using a specific structuring element. diff --git a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp index faa555063a..9781ef1ffb 100644 --- a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp +++ b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp @@ -103,6 +103,12 @@ protected: } }; +// NB: Overload for using from python +GAPI_EXPORTS_W cv::Ptr inline make_capture_src(const std::string& path) +{ + return make_src(path); +} + } // namespace wip } // namespace gapi } // namespace cv diff --git a/modules/gapi/misc/python/pyopencv_gapi.hpp b/modules/gapi/misc/python/pyopencv_gapi.hpp index 702e8c4032..0e862a4010 100644 --- a/modules/gapi/misc/python/pyopencv_gapi.hpp +++ b/modules/gapi/misc/python/pyopencv_gapi.hpp @@ -3,7 +3,14 @@ #ifdef HAVE_OPENCV_GAPI +// NB: Python wrapper replaces :: with _ for classes using gapi_GKernelPackage = cv::gapi::GKernelPackage; +using gapi_wip_IStreamSource_Ptr = cv::Ptr; + +// FIXME: Python wrapper generate code without namespace std, +// so it cause error: "string wasn't declared" +// WA: Create using +using std::string; template<> bool pyopencv_to(PyObject* obj, std::vector& value, const ArgInfo& info) @@ -78,6 +85,18 @@ PyObject* pyopencv_from(const GRunArgs& value) return list; } +template<> +bool pyopencv_to(PyObject* obj, GMetaArgs& value, const ArgInfo& info) +{ + return pyopencv_to_generic_vec(obj, value, info); +} + +template<> +PyObject* pyopencv_from(const GMetaArgs& value) +{ + return pyopencv_from_generic_vec(value); +} + template static PyObject* extract_proto_args(PyObject* py_args, PyObject* kw) { @@ -151,6 +170,19 @@ static PyObject* pyopencv_cv_gin(PyObject* , PyObject* py_args, PyObject* kw) return NULL; } } + else if (PyObject_TypeCheck(item, + reinterpret_cast(pyopencv_gapi_wip_IStreamSource_TypePtr))) + { + cv::gapi::wip::IStreamSource::Ptr source = + reinterpret_cast(item)->v; + args.emplace_back(source); + } + else + { + PyErr_SetString(PyExc_TypeError, "cv.gin can works only with cv::Mat," + "cv::Scalar, cv::gapi::wip::IStreamSource::Ptr"); + return NULL; + } } return pyopencv_from_generic_vec(args); diff --git a/modules/gapi/misc/python/shadow_gapi.hpp b/modules/gapi/misc/python/shadow_gapi.hpp index 4f988440e8..72d7686eeb 100644 --- a/modules/gapi/misc/python/shadow_gapi.hpp +++ b/modules/gapi/misc/python/shadow_gapi.hpp @@ -7,11 +7,22 @@ namespace cv GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage pkg); + // NB: This classes doesn't exist in *.so + // HACK: Mark them as a class to force python wrapper generate code for this entities class GAPI_EXPORTS_W_SIMPLE GProtoArg { }; class GAPI_EXPORTS_W_SIMPLE GProtoInputArgs { }; class GAPI_EXPORTS_W_SIMPLE GProtoOutputArgs { }; class GAPI_EXPORTS_W_SIMPLE GRunArg { }; + class GAPI_EXPORTS_W_SIMPLE GMetaArg { }; using GProtoInputArgs = GIOProtoArgs; using GProtoOutputArgs = GIOProtoArgs; + + namespace gapi + { + namespace wip + { + class GAPI_EXPORTS_W IStreamSource { }; + } + } } // namespace cv diff --git a/modules/gapi/misc/python/test/test_gapi_streaming.py b/modules/gapi/misc/python/test/test_gapi_streaming.py new file mode 100644 index 0000000000..bf182d9c91 --- /dev/null +++ b/modules/gapi/misc/python/test/test_gapi_streaming.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python + +import numpy as np +import cv2 as cv +import os + +from tests_common import NewOpenCVTests + +class test_gapi_streaming(NewOpenCVTests): + + def test_image_input(self): + sz = (1280, 720) + in_mat = np.random.randint(0, 100, sz).astype(np.uint8) + + # OpenCV + expected = cv.medianBlur(in_mat, 3) + + # G-API + g_in = cv.GMat() + g_out = cv.gapi.medianBlur(g_in, 3) + c = cv.GComputation(g_in, g_out) + ccomp = c.compileStreaming(cv.descr_of(cv.gin(in_mat))) + ccomp.setSource(cv.gin(in_mat)) + ccomp.start() + + _, actual = ccomp.pull() + + # Assert + self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF)) + + + def test_video_input(self): + ksize = 3 + path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']]) + + # OpenCV + cap = cv.VideoCapture(path) + + # G-API + g_in = cv.GMat() + g_out = cv.gapi.medianBlur(g_in, ksize) + c = cv.GComputation(g_in, g_out) + + ccomp = c.compileStreaming() + source = cv.gapi.wip.make_capture_src(path) + ccomp.setSource(source) + ccomp.start() + + # Assert + while cap.isOpened(): + has_expected, expected = cap.read() + has_actual, actual = ccomp.pull() + + self.assertEqual(has_expected, has_actual) + + if not has_actual: + break + + self.assertEqual(0.0, cv.norm(cv.medianBlur(expected, ksize), actual, cv.NORM_INF)) + + + def test_video_split3(self): + path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']]) + + # OpenCV + cap = cv.VideoCapture(path) + + # G-API + g_in = cv.GMat() + b, g, r = cv.gapi.split3(g_in) + c = cv.GComputation(cv.GIn(g_in), cv.GOut(b, g, r)) + + ccomp = c.compileStreaming() + source = cv.gapi.wip.make_capture_src(path) + ccomp.setSource(source) + ccomp.start() + + # Assert + while cap.isOpened(): + has_expected, frame = cap.read() + has_actual, actual = ccomp.pull() + + self.assertEqual(has_expected, has_actual) + + if not has_actual: + break + + expected = cv.split(frame) + for e, a in zip(expected, actual): + self.assertEqual(0.0, cv.norm(e, a, cv.NORM_INF)) + + + def test_video_add(self): + sz = (576, 768, 3) + in_mat = np.random.randint(0, 100, sz).astype(np.uint8) + + path = self.find_file('cv/video/768x576.avi', [os.environ['OPENCV_TEST_DATA_PATH']]) + + # OpenCV + cap = cv.VideoCapture(path) + + # G-API + g_in1 = cv.GMat() + g_in2 = cv.GMat() + out = cv.gapi.add(g_in1, g_in2) + c = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(out)) + + ccomp = c.compileStreaming() + source = cv.gapi.wip.make_capture_src(path) + ccomp.setSource(cv.gin(source, in_mat)) + ccomp.start() + + # Assert + while cap.isOpened(): + has_expected, frame = cap.read() + has_actual, actual = ccomp.pull() + + self.assertEqual(has_expected, has_actual) + + if not has_actual: + break + + expected = cv.add(frame, in_mat) + self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF)) + + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/modules/gapi/src/compiler/gcompiler.cpp b/modules/gapi/src/compiler/gcompiler.cpp index 2f46ea873b..76c40ddca0 100644 --- a/modules/gapi/src/compiler/gcompiler.cpp +++ b/modules/gapi/src/compiler/gcompiler.cpp @@ -448,6 +448,14 @@ cv::GStreamingCompiled cv::gimpl::GCompiler::produceStreamingCompiled(GPtr &&pg) outMetas = GModel::ConstGraph(*pg).metadata().get().outMeta; } + auto out_desc = GModel::ConstGraph(*pg).metadata().get().outputs; + GShapes out_shapes; + for (auto&& desc : out_desc) + { + out_shapes.push_back(desc.shape); + } + compiled.priv().setOutShapes(std::move(out_shapes)); + std::unique_ptr pE(new GStreamingExecutor(std::move(pg), m_args)); if (!m_metas.empty() && !outMetas.empty()) diff --git a/modules/gapi/src/compiler/gstreaming.cpp b/modules/gapi/src/compiler/gstreaming.cpp index 2e9c016ceb..29c98ddfd4 100644 --- a/modules/gapi/src/compiler/gstreaming.cpp +++ b/modules/gapi/src/compiler/gstreaming.cpp @@ -111,6 +111,39 @@ bool cv::GStreamingCompiled::pull(cv::GRunArgsP &&outs) return m_priv->pull(std::move(outs)); } +std::tuple cv::GStreamingCompiled::pull() +{ + GRunArgs run_args; + GRunArgsP outs; + const auto& out_shapes = m_priv->outShapes(); + run_args.reserve(out_shapes.size()); + outs.reserve(out_shapes.size()); + + for (auto&& shape : out_shapes) + { + switch (shape) + { + case cv::GShape::GMAT: + { + run_args.emplace_back(cv::Mat{}); + outs.emplace_back(&cv::util::get(run_args.back())); + break; + } + case cv::GShape::GSCALAR: + { + run_args.emplace_back(cv::Scalar{}); + outs.emplace_back(&cv::util::get(run_args.back())); + break; + } + default: + util::throw_error(std::logic_error("Only cv::GMat and cv::GScalar are supported for python output")); + } + } + + bool is_over = m_priv->pull(std::move(outs)); + return std::make_tuple(is_over, run_args); +} + bool cv::GStreamingCompiled::try_pull(cv::GRunArgsP &&outs) { return m_priv->try_pull(std::move(outs)); diff --git a/modules/gapi/src/compiler/gstreaming_priv.hpp b/modules/gapi/src/compiler/gstreaming_priv.hpp index 447bcda76e..73ca002f85 100644 --- a/modules/gapi/src/compiler/gstreaming_priv.hpp +++ b/modules/gapi/src/compiler/gstreaming_priv.hpp @@ -27,6 +27,7 @@ class GAPI_EXPORTS GStreamingCompiled::Priv GMetaArgs m_metas; // passed by user GMetaArgs m_outMetas; // inferred by compiler std::unique_ptr m_exec; + GShapes m_out_shapes; public: void setup(const GMetaArgs &metaArgs, @@ -45,6 +46,11 @@ public: void stop(); bool running() const; + + // NB: std::tuple pull() creates GRunArgs for outputs, + // so need to know out shapes to create corresponding GRunArg + void setOutShapes(GShapes shapes) { m_out_shapes = std::move(shapes); } + const GShapes& outShapes() const { return m_out_shapes; } }; } // namespace cv diff --git a/modules/gapi/test/streaming/gapi_streaming_tests.cpp b/modules/gapi/test/streaming/gapi_streaming_tests.cpp index 1150e6a862..dfd2331bfd 100644 --- a/modules/gapi/test/streaming/gapi_streaming_tests.cpp +++ b/modules/gapi/test/streaming/gapi_streaming_tests.cpp @@ -983,4 +983,39 @@ TEST_F(GAPI_Streaming_Unit, SetSource_After_Completion) EXPECT_EQ(0., cv::norm(out, out_ref, cv::NORM_INF)); } +// NB: Check pull overload for python +TEST(Streaming, Python_Pull_Overload) +{ + cv::GMat in; + auto out = cv::gapi::copy(in); + cv::GComputation c(in, out); + + cv::Size sz(3,3); + cv::Mat in_mat(sz, CV_8UC3); + cv::randu(in_mat, cv::Scalar::all(0), cv::Scalar(255)); + + auto ccomp = c.compileStreaming(cv::descr_of(in_mat)); + + EXPECT_TRUE(ccomp); + EXPECT_FALSE(ccomp.running()); + + ccomp.setSource(cv::gin(in_mat)); + + ccomp.start(); + EXPECT_TRUE(ccomp.running()); + + bool has_output; + cv::GRunArgs outputs; + std::tie(has_output, outputs) = ccomp.pull(); + + EXPECT_TRUE(has_output); + EXPECT_EQ(1u, outputs.size()); + + auto out_mat = cv::util::get(outputs[0]); + EXPECT_EQ(0., cv::norm(in_mat, out_mat, cv::NORM_INF)); + + ccomp.stop(); + EXPECT_FALSE(ccomp.running()); +} + } // namespace opencv_test From 8bf451a3e0c6af796691ee336abd157b762fd6af Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Thu, 15 Oct 2020 16:59:02 +0300 Subject: [PATCH 026/152] Merge pull request #18542 from TolyaTalamanov:at/import-network [G-API] Support ImportNetwork for cv::gapi::infer * wip * Refactoring * Fix comments to review * Fix warning Co-authored-by: Ruslan Garnov --- .../gapi/include/opencv2/gapi/infer/ie.hpp | 22 +++++++++- modules/gapi/src/backends/common/gbackend.hpp | 1 + modules/gapi/src/backends/ie/giebackend.cpp | 43 ++++++++++++++++--- .../src/backends/ie/giebackend/giewrapper.cpp | 18 ++++++++ .../src/backends/ie/giebackend/giewrapper.hpp | 13 ++++++ 5 files changed, 88 insertions(+), 9 deletions(-) diff --git a/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/modules/gapi/include/opencv2/gapi/infer/ie.hpp index 8421d9e2c9..dd2459da08 100644 --- a/modules/gapi/include/opencv2/gapi/infer/ie.hpp +++ b/modules/gapi/include/opencv2/gapi/infer/ie.hpp @@ -60,6 +60,8 @@ namespace detail { std::size_t num_in; // How many inputs are defined in the operation std::size_t num_out; // How many outputs are defined in the operation + enum class Kind { Load, Import }; + Kind kind; bool is_generic; }; } // namespace detail @@ -83,6 +85,16 @@ public: : desc{ model, weights, device, {}, {}, {} , std::tuple_size::value // num_in , std::tuple_size::value // num_out + , detail::ParamDesc::Kind::Load + , false} { + }; + + Params(const std::string &model, + const std::string &device) + : desc{ model, {}, device, {}, {}, {} + , std::tuple_size::value // num_in + , std::tuple_size::value // num_out + , detail::ParamDesc::Kind::Import , false} { }; @@ -122,11 +134,17 @@ protected: template<> class Params { public: - Params(const std::string& tag, + Params(const std::string &tag, const std::string &model, const std::string &weights, const std::string &device) - : desc{ model, weights, device, {}, {}, {}, 0u, 0u, true}, m_tag(tag) { + : desc{ model, weights, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Load, true}, m_tag(tag) { + }; + + Params(const std::string &tag, + const std::string &model, + const std::string &device) + : desc{ model, {}, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Import, true}, m_tag(tag) { }; // BEGIN(G-API's network parametrization API) diff --git a/modules/gapi/src/backends/common/gbackend.hpp b/modules/gapi/src/backends/common/gbackend.hpp index f747a0dd1c..e96d2b0776 100644 --- a/modules/gapi/src/backends/common/gbackend.hpp +++ b/modules/gapi/src/backends/common/gbackend.hpp @@ -27,6 +27,7 @@ namespace gimpl { : cv::Mat(v.dims(), v.type(), v.ptr()); } inline RMat::View asView(const Mat& m, RMat::View::DestroyCallback&& cb = nullptr) { + // FIXME: View doesn't support multidimensional cv::Mat's return RMat::View(cv::descr_of(m), m.data, m.step, std::move(cb)); } diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp index b7bda2fe9f..08836163a7 100644 --- a/modules/gapi/src/backends/ie/giebackend.cpp +++ b/modules/gapi/src/backends/ie/giebackend.cpp @@ -175,11 +175,26 @@ struct IEUnit { IE::InputsDataMap inputs; IE::OutputsDataMap outputs; + IE::ExecutableNetwork this_network; + cv::gimpl::ie::wrap::Plugin this_plugin; + explicit IEUnit(const cv::gapi::ie::detail::ParamDesc &pp) : params(pp) { - net = cv::gimpl::ie::wrap::readNetwork(params); - inputs = net.getInputsInfo(); - outputs = net.getOutputsInfo(); + if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) { + net = cv::gimpl::ie::wrap::readNetwork(params); + inputs = net.getInputsInfo(); + outputs = net.getOutputsInfo(); + } else if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import) { + this_plugin = cv::gimpl::ie::wrap::getPlugin(params); + this_network = cv::gimpl::ie::wrap::importNetwork(this_plugin, params); + // FIXME: ICNNetwork returns InputsDataMap/OutputsDataMap, + // but ExecutableNetwork returns ConstInputsDataMap/ConstOutputsDataMap + inputs = cv::gimpl::ie::wrap::toInputsDataMap(this_network.GetInputsInfo()); + outputs = cv::gimpl::ie::wrap::toOutputsDataMap(this_network.GetOutputsInfo()); + } else { + cv::util::throw_error(std::logic_error("Unsupported ParamDesc::Kind")); + } + // The practice shows that not all inputs and not all outputs // are mandatory to specify in IE model. // So what we're concerned here about is: @@ -205,10 +220,15 @@ struct IEUnit { // This method is [supposed to be] called at Island compilation stage cv::gimpl::ie::IECompiled compile() const { - auto plugin = cv::gimpl::ie::wrap::getPlugin(params); - auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params); - auto this_request = this_network.CreateInferRequest(); + IEUnit* non_const_this = const_cast(this); + if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Load) { + // FIXME: In case importNetwork for fill inputs/outputs need to obtain ExecutableNetwork, but + // for loadNetwork they can be obtained by using readNetwork + non_const_this->this_plugin = cv::gimpl::ie::wrap::getPlugin(params); + non_const_this->this_network = cv::gimpl::ie::wrap::loadNetwork(non_const_this->this_plugin, net, params); + } + auto this_request = non_const_this->this_network.CreateInferRequest(); // Bind const data to infer request for (auto &&p : params.const_inputs) { // FIXME: SetBlob is known to be inefficient, @@ -217,7 +237,16 @@ struct IEUnit { // Still, constant data is to set only once. this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second)); } - return {plugin, this_network, this_request}; + // Bind const data to infer request + for (auto &&p : params.const_inputs) { + // FIXME: SetBlob is known to be inefficient, + // it is worth to make a customizable "initializer" and pass the + // cv::Mat-wrapped blob there to support IE's optimal "GetBlob idiom" + // Still, constant data is to set only once. + this_request.SetBlob(p.first, wrapIE(p.second.first, p.second.second)); + } + + return {this_plugin, this_network, this_request}; } }; diff --git a/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp b/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp index 444d9553e7..8f5a7eca11 100644 --- a/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp +++ b/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp @@ -22,6 +22,24 @@ namespace IE = InferenceEngine; namespace giewrap = cv::gimpl::ie::wrap; using GIEParam = cv::gapi::ie::detail::ParamDesc; +IE::InputsDataMap giewrap::toInputsDataMap (const IE::ConstInputsDataMap& inputs) { + IE::InputsDataMap transformed; + auto convert = [](const std::pair& p) { + return std::make_pair(p.first, std::make_shared(*p.second)); + }; + std::transform(inputs.begin(), inputs.end(), std::inserter(transformed, transformed.end()), convert); + return transformed; +} + +IE::OutputsDataMap giewrap::toOutputsDataMap (const IE::ConstOutputsDataMap& outputs) { + IE::OutputsDataMap transformed; + auto convert = [](const std::pair& p) { + return std::make_pair(p.first, std::make_shared(*p.second)); + }; + std::transform(outputs.begin(), outputs.end(), std::inserter(transformed, transformed.end()), convert); + return transformed; +} + #if INF_ENGINE_RELEASE < 2020000000 // < 2020.1 // Load extensions (taken from DNN module) std::vector giewrap::getExtensions(const GIEParam& params) { diff --git a/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp b/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp index 7871942d26..3927c802b7 100644 --- a/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp +++ b/modules/gapi/src/backends/ie/giebackend/giewrapper.hpp @@ -28,7 +28,11 @@ namespace wrap { GAPI_EXPORTS std::vector getExtensions(const GIEParam& params); GAPI_EXPORTS IE::CNNNetwork readNetwork(const GIEParam& params); +IE::InputsDataMap toInputsDataMap (const IE::ConstInputsDataMap& inputs); +IE::OutputsDataMap toOutputsDataMap(const IE::ConstOutputsDataMap& outputs); + #if INF_ENGINE_RELEASE < 2019020000 // < 2019.R2 +using Plugin = IE::InferencePlugin; GAPI_EXPORTS IE::InferencePlugin getPlugin(const GIEParam& params); GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork( IE::InferencePlugin& plugin, const IE::CNNNetwork& net, @@ -36,7 +40,12 @@ GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork( IE::InferencePlugin& return plugin.LoadNetwork(net, {}); // FIXME: 2nd parameter to be // configurable via the API } +GAPI_EXPORTS inline IE::ExecutableNetwork importNetwork( IE::CNNNetwork& plugin, + const GIEParam& param) { + return plugin.ImportNetwork(param.model_path, param.device_id, {}); +} #else // >= 2019.R2 +using Plugin = IE::Core; GAPI_EXPORTS IE::Core getCore(); GAPI_EXPORTS IE::Core getPlugin(const GIEParam& params); GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork( IE::Core& core, @@ -44,6 +53,10 @@ GAPI_EXPORTS inline IE::ExecutableNetwork loadNetwork( IE::Core& core const GIEParam& params) { return core.LoadNetwork(net, params.device_id); } +GAPI_EXPORTS inline IE::ExecutableNetwork importNetwork( IE::Core& core, + const GIEParam& param) { + return core.ImportNetwork(param.model_path, param.device_id, {}); +} #endif // INF_ENGINE_RELEASE < 2019020000 }}}} From 1fb6c6e6e570b06ddeb6116b01052f06193fdc06 Mon Sep 17 00:00:00 2001 From: Krushnal Patel Date: Wed, 14 Oct 2020 23:22:46 +0530 Subject: [PATCH 027/152] Update demosaicing.cpp --- modules/imgproc/src/demosaicing.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/imgproc/src/demosaicing.cpp b/modules/imgproc/src/demosaicing.cpp index e02104d222..03bc781046 100644 --- a/modules/imgproc/src/demosaicing.cpp +++ b/modules/imgproc/src/demosaicing.cpp @@ -1566,9 +1566,9 @@ public: int x = 1; if (start_with_green) { - D[blue<<1] = (S[-sstep] + S[sstep]) >> 1; + D[blue<<1] = (S[-sstep] + S[sstep] + 1) >> 1; D[1] = S[0]; - D[2-(blue<<1)] = (S[-1] + S[1]) >> 1; + D[2-(blue<<1)] = (S[-1] + S[1] + 1) >> 1; D += dcn; ++S; ++x; @@ -1584,7 +1584,7 @@ public: { D[0] = S[0]; D[1] = (std::abs(S[-1] - S[1]) > std::abs(S[sstep] - S[-sstep]) ? (S[sstep] + S[-sstep] + 1) : (S[-1] + S[1] + 1)) >> 1; - D[2] = (S[-sstep-1] + S[-sstep+1] + S[sstep-1] + S[sstep+1]) >> 2; + D[2] = (S[-sstep-1] + S[-sstep+1] + S[sstep-1] + S[sstep+1] + 2) >> 2; D[3] = (S[0] + S[2] + 1) >> 1; D[4] = S[1]; From bc6a70c689745e40a84ff70b67cf29106449799f Mon Sep 17 00:00:00 2001 From: Pavel Rojtberg Date: Wed, 7 Oct 2020 12:19:17 +0200 Subject: [PATCH 028/152] imwrite: multi-image overload for bindings --- modules/imgcodecs/include/opencv2/imgcodecs.hpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/imgcodecs/include/opencv2/imgcodecs.hpp b/modules/imgcodecs/include/opencv2/imgcodecs.hpp index e1f8208e0b..e2636e19f7 100644 --- a/modules/imgcodecs/include/opencv2/imgcodecs.hpp +++ b/modules/imgcodecs/include/opencv2/imgcodecs.hpp @@ -220,6 +220,14 @@ It also demonstrates how to save multiple images in a TIFF file: CV_EXPORTS_W bool imwrite( const String& filename, InputArray img, const std::vector& params = std::vector()); +/// @overload multi-image overload for bindings +CV_WRAP static inline +bool imwritemulti(const String& filename, InputArrayOfArrays img, + const std::vector& params = std::vector()) +{ + return imwrite(filename, img, params); +} + /** @brief Reads an image from a buffer in memory. The function imdecode reads an image from the specified buffer in the memory. If the buffer is too short or From aa51382dbc6526df2e3b0aa1c438d6c3cc8854cc Mon Sep 17 00:00:00 2001 From: arodrigu Date: Fri, 16 Oct 2020 14:41:41 +0200 Subject: [PATCH 029/152] Fix: UsacParams Python bindings --- modules/calib3d/include/opencv2/calib3d.hpp | 23 +++++++++++---------- modules/calib3d/src/solvepnp.cpp | 15 ++++++++++++++ 2 files changed, 27 insertions(+), 11 deletions(-) diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index cc5fdbfe1c..a1a4c57d8a 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -550,17 +550,18 @@ enum NeighborSearchMethod { NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS }; struct CV_EXPORTS_W_SIMPLE UsacParams { // in alphabetical order - double confidence = 0.99; - bool isParallel = false; - int loIterations = 5; - LocalOptimMethod loMethod = LocalOptimMethod::LOCAL_OPTIM_INNER_LO; - int loSampleSize = 14; - int maxIterations = 5000; - NeighborSearchMethod neighborsSearch = NeighborSearchMethod::NEIGH_GRID; - int randomGeneratorState = 0; - SamplingMethod sampler = SamplingMethod::SAMPLING_UNIFORM; - ScoreMethod score = ScoreMethod::SCORE_METHOD_MSAC; - double threshold = 1.5; + CV_WRAP UsacParams(); + CV_PROP_RW double confidence; + CV_PROP_RW bool isParallel; + CV_PROP_RW int loIterations; + CV_PROP_RW LocalOptimMethod loMethod; + CV_PROP_RW int loSampleSize; + CV_PROP_RW int maxIterations; + CV_PROP_RW NeighborSearchMethod neighborsSearch; + CV_PROP_RW int randomGeneratorState; + CV_PROP_RW SamplingMethod sampler; + CV_PROP_RW ScoreMethod score; + CV_PROP_RW double threshold; }; /** @brief Converts a rotation matrix to a rotation vector or vice versa. diff --git a/modules/calib3d/src/solvepnp.cpp b/modules/calib3d/src/solvepnp.cpp index 5c04662489..bb595085fa 100644 --- a/modules/calib3d/src/solvepnp.cpp +++ b/modules/calib3d/src/solvepnp.cpp @@ -197,6 +197,21 @@ public: Mat tvec; }; +UsacParams::UsacParams() +{ + confidence = 0.99; + isParallel = false; + loIterations = 5; + loMethod = LocalOptimMethod::LOCAL_OPTIM_INNER_LO; + loSampleSize = 14; + maxIterations = 5000; + neighborsSearch = NeighborSearchMethod::NEIGH_GRID; + randomGeneratorState = 0; + sampler = SamplingMethod::SAMPLING_UNIFORM; + score = ScoreMethod::SCORE_METHOD_MSAC; + threshold = 1.5; +} + bool solvePnPRansac(InputArray _opoints, InputArray _ipoints, InputArray _cameraMatrix, InputArray _distCoeffs, OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess, From b5717f82a04019fa2fa8ac7e671d5403cee324ce Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 16 Oct 2020 15:35:51 +0000 Subject: [PATCH 030/152] core: fix __clang_major__ typo regression --- modules/core/include/opencv2/core/cvdef.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 08db1c820f..5bd3af33a4 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -90,7 +90,7 @@ namespace cv { namespace debug_build_guard { } using namespace debug_build_guard // keep current value (through OpenCV port file) #elif defined __GNUC__ || (defined (__cpluscplus) && (__cpluscplus >= 201103)) #define CV_Func __func__ -#elif defined __clang__ && (__clang_minor__ * 100 + __clang_major >= 305) +#elif defined __clang__ && (__clang_minor__ * 100 + __clang_major__ >= 305) #define CV_Func __func__ #elif defined(__STDC_VERSION__) && (__STDC_VERSION >= 199901) #define CV_Func __func__ From ef21fd3cf8496e5c03da4920dee767c2b7df1615 Mon Sep 17 00:00:00 2001 From: Giles Payne Date: Sat, 17 Oct 2020 14:31:24 +0900 Subject: [PATCH 031/152] Fix handling of properties with enum type --- modules/objc/generator/gen_objc.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py index 1ae00ab5f1..9ea245eef0 100755 --- a/modules/objc/generator/gen_objc.py +++ b/modules/objc/generator/gen_objc.py @@ -274,8 +274,9 @@ class ClassInfo(GeneralInfo): def getForwardDeclarations(self, module): enum_decl = filter(lambda x:self.isEnum(x) and type_dict[x]["import_module"] != module, self.imports) + enum_imports = list(set(map(lambda m: type_dict[m]["import_module"], enum_decl))) class_decl = filter(lambda x: not self.isEnum(x), self.imports) - return ["#import \"%s.h\"" % type_dict[c]["import_module"] for c in enum_decl] + [""] + ["@class %s;" % c for c in sorted(class_decl)] + return ["#import \"%s.h\"" % c for c in enum_imports] + [""] + ["@class %s;" % c for c in sorted(class_decl)] def addImports(self, ctype, is_out_type): if ctype == self.cname: @@ -721,10 +722,7 @@ class ObjectiveCWrapperGenerator(object): # class props for p in decl[3]: - if True: #"vector" not in p[0]: - classinfo.props.append( ClassPropInfo(p) ) - else: - logging.warning("Skipped property: [%s]" % name, p) + classinfo.props.append( ClassPropInfo(p) ) if name != self.Module: type_dict.setdefault("Ptr_"+name, {}).update( @@ -786,7 +784,8 @@ class ObjectiveCWrapperGenerator(object): type_dict[objc_type] = { "cast_to" : get_cname(enumType), "objc_type": objc_type, "is_enum": True, - "import_module": import_module} + "import_module": import_module, + "from_cpp": "(" + objc_type + ")%(n)s"} self.classes[self.Module].member_enums.append(objc_type) const_decls = decl[3] @@ -1301,7 +1300,7 @@ typedef NS_ENUM(int, {2}) {{ ci.method_implementations.write("\t" + ("\n\t".join(prologue)) + "\n") ci.method_implementations.write("\t" + ptr_ref + pi.name + " = valVector;\n}\n\n") else: - to_cpp = type_data.get("to_cpp", "%(n)s") + to_cpp = type_data.get("to_cpp", ("(" + type_data.get("cast_to") + ")%(n)s") if type_data.has_key("cast_to") else "%(n)s") val = to_cpp % {"n": pi.name} ci.method_implementations.write("-(void)set" + pi.name[0].upper() + pi.name[1:] + ":(" + objc_type + ")" + pi.name + " {\n\t" + ptr_ref + pi.name + " = " + val + ";\n}\n\n") From c82417697a35c1b265603328d9b292cc07d3d8b3 Mon Sep 17 00:00:00 2001 From: Kun Liang Date: Mon, 19 Oct 2020 04:30:36 +0800 Subject: [PATCH 032/152] Merge pull request #18068 from lionkunonly:gsoc_2020_simd [GSoC] OpenCV.js: WASM SIMD optimization 2.0 * gsoc_2020_simd Add perf test for filter2d * add perf test for kernel scharr and kernel gaussianBlur * add perf test for blur, medianBlur, erode, dilate * fix the errors for the opencv PR robot fix the trailing whitespace. * add perf tests for kernel remap, warpAffine, warpPersepective, pyrDown * fix a bug in modules/js/perf/perf_imgproc/perf_remap.js * add function smoothBorder in helpfun.js and remove replicated function in perf test of warpAffine and warpPrespective * fix the trailing white space issues * add OpenCV.js loader * Implement the Loader with help of WebAssembly Feature Detection, remove trailing whitespaces * modify the explantion for loader in js_setup.markdown and fix bug in loader.js --- .../js_setup/js_setup/js_setup.markdown | 34 ++ .../include/opencv2/core/hal/intrin_wasm.hpp | 313 +++------------ modules/js/CMakeLists.txt | 19 + modules/js/perf/base.js | 31 +- modules/js/perf/perf_64bits.html | 67 ++++ modules/js/perf/perf_64bits.js | 180 +++++++++ modules/js/perf/perf_helpfunc.js | 244 ++++++++++++ modules/js/perf/perf_imgproc/perf_blur.html | 73 ++++ modules/js/perf/perf_imgproc/perf_blur.js | 130 ++++++ modules/js/perf/perf_imgproc/perf_cvtcolor.js | 372 +++++------------- modules/js/perf/perf_imgproc/perf_dilate.html | 73 ++++ modules/js/perf/perf_imgproc/perf_dilate.js | 117 ++++++ modules/js/perf/perf_imgproc/perf_erode.html | 73 ++++ modules/js/perf/perf_imgproc/perf_erode.js | 117 ++++++ .../js/perf/perf_imgproc/perf_filter2D.html | 73 ++++ modules/js/perf/perf_imgproc/perf_filter2D.js | 127 ++++++ .../perf/perf_imgproc/perf_gaussianBlur.html | 73 ++++ .../js/perf/perf_imgproc/perf_gaussianBlur.js | 126 ++++++ .../js/perf/perf_imgproc/perf_medianBlur.html | 73 ++++ .../js/perf/perf_imgproc/perf_medianBlur.js | 118 ++++++ .../js/perf/perf_imgproc/perf_pyrDown.html | 73 ++++ modules/js/perf/perf_imgproc/perf_pyrDown.js | 116 ++++++ modules/js/perf/perf_imgproc/perf_remap.html | 73 ++++ modules/js/perf/perf_imgproc/perf_remap.js | 182 +++++++++ modules/js/perf/perf_imgproc/perf_resize.js | 241 ++++-------- modules/js/perf/perf_imgproc/perf_scharr.html | 73 ++++ modules/js/perf/perf_imgproc/perf_scharr.js | 156 ++++++++ modules/js/perf/perf_imgproc/perf_sobel.html | 73 ++++ modules/js/perf/perf_imgproc/perf_sobel.js | 170 ++++++++ .../js/perf/perf_imgproc/perf_threshold.js | 241 +++++------- .../js/perf/perf_imgproc/perf_warpAffine.html | 73 ++++ .../js/perf/perf_imgproc/perf_warpAffine.js | 130 ++++++ .../perf_imgproc/perf_warpPerspective.html | 73 ++++ .../perf/perf_imgproc/perf_warpPerspective.js | 143 +++++++ modules/js/src/loader.js | 96 +++++ platforms/js/build_js.py | 14 + 36 files changed, 3513 insertions(+), 847 deletions(-) create mode 100644 modules/js/perf/perf_64bits.html create mode 100644 modules/js/perf/perf_64bits.js create mode 100644 modules/js/perf/perf_imgproc/perf_blur.html create mode 100644 modules/js/perf/perf_imgproc/perf_blur.js create mode 100644 modules/js/perf/perf_imgproc/perf_dilate.html create mode 100644 modules/js/perf/perf_imgproc/perf_dilate.js create mode 100644 modules/js/perf/perf_imgproc/perf_erode.html create mode 100644 modules/js/perf/perf_imgproc/perf_erode.js create mode 100644 modules/js/perf/perf_imgproc/perf_filter2D.html create mode 100644 modules/js/perf/perf_imgproc/perf_filter2D.js create mode 100644 modules/js/perf/perf_imgproc/perf_gaussianBlur.html create mode 100644 modules/js/perf/perf_imgproc/perf_gaussianBlur.js create mode 100644 modules/js/perf/perf_imgproc/perf_medianBlur.html create mode 100644 modules/js/perf/perf_imgproc/perf_medianBlur.js create mode 100644 modules/js/perf/perf_imgproc/perf_pyrDown.html create mode 100644 modules/js/perf/perf_imgproc/perf_pyrDown.js create mode 100644 modules/js/perf/perf_imgproc/perf_remap.html create mode 100644 modules/js/perf/perf_imgproc/perf_remap.js create mode 100644 modules/js/perf/perf_imgproc/perf_scharr.html create mode 100644 modules/js/perf/perf_imgproc/perf_scharr.js create mode 100644 modules/js/perf/perf_imgproc/perf_sobel.html create mode 100644 modules/js/perf/perf_imgproc/perf_sobel.js create mode 100644 modules/js/perf/perf_imgproc/perf_warpAffine.html create mode 100644 modules/js/perf/perf_imgproc/perf_warpAffine.js create mode 100644 modules/js/perf/perf_imgproc/perf_warpPerspective.html create mode 100644 modules/js/perf/perf_imgproc/perf_warpPerspective.js create mode 100644 modules/js/src/loader.js diff --git a/doc/js_tutorials/js_setup/js_setup/js_setup.markdown b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown index 87167cd219..435f06fe02 100644 --- a/doc/js_tutorials/js_setup/js_setup/js_setup.markdown +++ b/doc/js_tutorials/js_setup/js_setup/js_setup.markdown @@ -32,6 +32,15 @@ source ./emsdk_env.sh echo ${EMSCRIPTEN} @endcode +The version 1.39.16 of emscripten is verified for latest WebAssembly. Please check the version of emscripten to use the newest features of WebAssembly. + +For example: +@code{.bash} +./emsdk update +./emsdk install 1.39.16 +./emsdk activate 1.39.16 +@endcode + Obtaining OpenCV Source Code -------------------------- @@ -76,6 +85,31 @@ Building OpenCV.js from Source python ./platforms/js/build_js.py build_wasm --build_wasm @endcode +-# [Optional] To build the OpenCV.js loader, append `--build_loader`. + + For example: + @code{.bash} + python ./platforms/js/build_js.py build_js --build_loader + @endcode + + @note + The loader is implemented as a js file in the path `/bin/loader.js`. The loader utilizes the [WebAssembly Feature Detection](https://github.com/GoogleChromeLabs/wasm-feature-detect) to detect the features of the broswer and load corresponding OpenCV.js automatically. To use it, you need to use the UMD version of [WebAssembly Feature Detection](https://github.com/GoogleChromeLabs/wasm-feature-detect) and introduce the `loader.js` in your Web application. + + Example Code: + @code{.javascipt} + // Set paths configuration + let pathsConfig = { + wasm: "../../build_wasm/opencv.js", + threads: "../../build_mt/opencv.js", + simd: "../../build_simd/opencv.js", + threadsSimd: "../../build_mtSIMD/opencv.js", + } + + // Load OpenCV.js and use the pathsConfiguration and main function as the params. + loadOpenCV(pathsConfig, main); + @endcode + + -# [optional] To build documents, append `--build_doc` option. For example: diff --git a/modules/core/include/opencv2/core/hal/intrin_wasm.hpp b/modules/core/include/opencv2/core/hal/intrin_wasm.hpp index d1bfb6da6d..ef928f6a5c 100644 --- a/modules/core/include/opencv2/core/hal/intrin_wasm.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_wasm.hpp @@ -207,13 +207,7 @@ struct v_uint64x2 uint64 get0() const { -#ifdef __wasm_unimplemented_simd128__ return (uint64)wasm_i64x2_extract_lane(val, 0); -#else - uint64 des[2]; - wasm_v128_store(des, val); - return des[0]; -#endif } v128_t val; @@ -235,13 +229,7 @@ struct v_int64x2 int64 get0() const { -#ifdef __wasm_unimplemented_simd128__ return wasm_i64x2_extract_lane(val, 0); -#else - int64 des[2]; - wasm_v128_store(des, val); - return des[0]; -#endif } v128_t val; @@ -263,13 +251,7 @@ struct v_float64x2 double get0() const { -#ifdef __wasm_unimplemented_simd128__ return wasm_f64x2_extract_lane(val, 0); -#else - double des[2]; - wasm_v128_store(des, val); - return des[0]; -#endif } v128_t val; @@ -1797,22 +1779,9 @@ OPENCV_HAL_IMPL_WASM_INITVEC(v_int16x8, short, s16, i16x8, short) OPENCV_HAL_IMPL_WASM_INITVEC(v_uint32x4, unsigned, u32, i32x4, int) OPENCV_HAL_IMPL_WASM_INITVEC(v_int32x4, int, s32, i32x4, int) OPENCV_HAL_IMPL_WASM_INITVEC(v_float32x4, float, f32, f32x4, float) - -#ifdef __wasm_unimplemented_simd128__ OPENCV_HAL_IMPL_WASM_INITVEC(v_uint64x2, uint64, u64, i64x2, int64) OPENCV_HAL_IMPL_WASM_INITVEC(v_int64x2, int64, s64, i64x2, int64) OPENCV_HAL_IMPL_WASM_INITVEC(v_float64x2, double, f64, f64x2, double) -#else -#define OPENCV_HAL_IMPL_FALLBACK_INITVEC(_Tpvec, _Tp, suffix, _Tps) \ -inline _Tpvec v_setzero_##suffix() { return _Tpvec((_Tps)0, (_Tps)0); } \ -inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec((_Tps)v, (_Tps)v); } \ -template inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \ -{ return _Tpvec(a.val); } - -OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_uint64x2, uint64, u64, int64) -OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_int64x2, int64, s64, int64) -OPENCV_HAL_IMPL_FALLBACK_INITVEC(v_float64x2, double, f64, double) -#endif //////////////// PACK /////////////// inline v_uint8x16 v_pack(const v_uint16x8& a, const v_uint16x8& b) @@ -1931,28 +1900,18 @@ inline v_int16x8 v_rshr_pack(const v_int32x4& a, const v_int32x4& b) template inline v_uint32x4 v_rshr_pack(const v_uint64x2& a, const v_uint64x2& b) { -#ifdef __wasm_unimplemented_simd128__ v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1))); v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n); v128_t b1 = wasm_u64x2_shr(wasm_i64x2_add(b.val, delta), n); return v_uint32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27)); -#else - fallback::v_uint64x2 a_(a), b_(b); - return fallback::v_rshr_pack(a_, b_); -#endif } template inline v_int32x4 v_rshr_pack(const v_int64x2& a, const v_int64x2& b) { -#ifdef __wasm_unimplemented_simd128__ v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1))); v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n); v128_t b1 = wasm_i64x2_shr(wasm_i64x2_add(b.val, delta), n); return v_int32x4(wasm_v8x16_shuffle(a1, b1, 0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27)); -#else - fallback::v_int64x2 a_(a), b_(b); - return fallback::v_rshr_pack(a_, b_); -#endif } template inline v_uint8x16 v_rshr_pack_u(const v_int16x8& a, const v_int16x8& b) @@ -2139,7 +2098,6 @@ inline void v_rshr_pack_store(short* ptr, const v_int32x4& a) template inline void v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a) { -#ifdef __wasm_unimplemented_simd128__ v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1))); v128_t a1 = wasm_u64x2_shr(wasm_i64x2_add(a.val, delta), n); v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11); @@ -2148,15 +2106,10 @@ inline void v_rshr_pack_store(unsigned* ptr, const v_uint64x2& a) for (int i=0; i<2; ++i) { ptr[i] = t_ptr[i]; } -#else - fallback::v_uint64x2 _a(a); - fallback::v_rshr_pack_store(ptr, _a); -#endif } template inline void v_rshr_pack_store(int* ptr, const v_int64x2& a) { -#ifdef __wasm_unimplemented_simd128__ v128_t delta = wasm_i64x2_splat(((int64)1 << (n-1))); v128_t a1 = wasm_i64x2_shr(wasm_i64x2_add(a.val, delta), n); v128_t r = wasm_v8x16_shuffle(a1, a1, 0,1,2,3,8,9,10,11,0,1,2,3,8,9,10,11); @@ -2165,10 +2118,6 @@ inline void v_rshr_pack_store(int* ptr, const v_int64x2& a) for (int i=0; i<2; ++i) { ptr[i] = t_ptr[i]; } -#else - fallback::v_int64x2 _a(a); - fallback::v_rshr_pack_store(ptr, _a); -#endif } template inline void v_rshr_pack_u_store(uchar* ptr, const v_int16x8& a) @@ -2228,7 +2177,6 @@ inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uin const v_uint64x2& d, const v_uint64x2& e, const v_uint64x2& f, const v_uint64x2& g, const v_uint64x2& h) { -#ifdef __wasm_unimplemented_simd128__ v128_t maxval = wasm_i32x4_splat(255); v128_t a1 = wasm_v128_bitselect(maxval, a.val, ((__u64x2)(a.val) > (__u64x2)maxval)); v128_t b1 = wasm_v128_bitselect(maxval, b.val, ((__u64x2)(b.val) > (__u64x2)maxval)); @@ -2245,10 +2193,6 @@ inline v_uint8x16 v_pack_b(const v_uint64x2& a, const v_uint64x2& b, const v_uin v128_t abcd = wasm_v8x16_shuffle(ab, cd, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19); v128_t efgh = wasm_v8x16_shuffle(ef, gh, 0,1,2,3,16,17,18,19,0,1,2,3,16,17,18,19); return v_uint8x16(wasm_v8x16_shuffle(abcd, efgh, 0,1,2,3,4,5,6,7,16,17,18,19,20,21,22,23)); -#else - fallback::v_uint64x2 a_(a), b_(b), c_(c), d_(d), e_(e), f_(f), g_(g), h_(h); - return fallback::v_pack_b(a_, b_, c_, d_, e_, f_, g_, h_); -#endif } inline v_float32x4 v_matmul(const v_float32x4& v, const v_float32x4& m0, @@ -2310,8 +2254,6 @@ OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float32x4, wasm_f32x4_add) OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float32x4, wasm_f32x4_sub) OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float32x4, wasm_f32x4_mul) OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float32x4, wasm_f32x4_div) - -#ifdef __wasm_unimplemented_simd128__ OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_uint64x2, wasm_i64x2_add) OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_uint64x2, wasm_i64x2_sub) OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_int64x2, wasm_i64x2_add) @@ -2320,30 +2262,6 @@ OPENCV_HAL_IMPL_WASM_BIN_OP(+, v_float64x2, wasm_f64x2_add) OPENCV_HAL_IMPL_WASM_BIN_OP(-, v_float64x2, wasm_f64x2_sub) OPENCV_HAL_IMPL_WASM_BIN_OP(*, v_float64x2, wasm_f64x2_mul) OPENCV_HAL_IMPL_WASM_BIN_OP(/, v_float64x2, wasm_f64x2_div) -#else -#define OPENCV_HAL_IMPL_FALLBACK_BIN_OP(bin_op, _Tpvec) \ -inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ -{ \ - fallback::_Tpvec a_(a), b_(b); \ - return _Tpvec((a_) bin_op (b_)); \ -} \ -inline _Tpvec& operator bin_op##= (_Tpvec& a, const _Tpvec& b) \ -{ \ - fallback::_Tpvec a_(a), b_(b); \ - a_ bin_op##= b_; \ - a = _Tpvec(a_); \ - return a; \ -} - -OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_uint64x2) -OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_uint64x2) -OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_int64x2) -OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_int64x2) -OPENCV_HAL_IMPL_FALLBACK_BIN_OP(+, v_float64x2) -OPENCV_HAL_IMPL_FALLBACK_BIN_OP(-, v_float64x2) -OPENCV_HAL_IMPL_FALLBACK_BIN_OP(*, v_float64x2) -OPENCV_HAL_IMPL_FALLBACK_BIN_OP(/, v_float64x2) -#endif // saturating multiply 8-bit, 16-bit #define OPENCV_HAL_IMPL_WASM_MUL_SAT(_Tpvec, _Tpwvec) \ @@ -2405,19 +2323,11 @@ inline void v_mul_expand(const v_uint16x8& a, const v_uint16x8& b, inline void v_mul_expand(const v_uint32x4& a, const v_uint32x4& b, v_uint64x2& c, v_uint64x2& d) { -#ifdef __wasm_unimplemented_simd128__ v_uint64x2 a0, a1, b0, b1; v_expand(a, a0, a1); v_expand(b, b0, b1); c.val = ((__u64x2)(a0.val) * (__u64x2)(b0.val)); d.val = ((__u64x2)(a1.val) * (__u64x2)(b1.val)); -#else - fallback::v_uint32x4 a_(a), b_(b); - fallback::v_uint64x2 c_, d_; - fallback::v_mul_expand(a_, b_, c_, d_); - c = v_uint64x2(c_); - d = v_uint64x2(d_); -#endif } inline v_int16x8 v_mul_hi(const v_int16x8& a, const v_int16x8& b) @@ -2457,7 +2367,6 @@ inline v_int32x4 v_dotprod(const v_int16x8& a, const v_int16x8& b, const v_int32 inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b) { -#ifdef __wasm_unimplemented_simd128__ v128_t a0 = wasm_i64x2_shr(wasm_i64x2_shl(a.val, 32), 32); v128_t a1 = wasm_i64x2_shr(a.val, 32); v128_t b0 = wasm_i64x2_shr(wasm_i64x2_shl(b.val, 32), 32); @@ -2465,22 +2374,10 @@ inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b) v128_t c = (v128_t)((__i64x2)a0 * (__i64x2)b0); v128_t d = (v128_t)((__i64x2)a1 * (__i64x2)b1); return v_int64x2(wasm_i64x2_add(c, d)); -#else - fallback::v_int32x4 a_(a); - fallback::v_int32x4 b_(b); - return fallback::v_dotprod(a_, b_); -#endif } inline v_int64x2 v_dotprod(const v_int32x4& a, const v_int32x4& b, const v_int64x2& c) { -#ifdef __wasm_unimplemented_simd128__ return v_dotprod(a, b) + c; -#else - fallback::v_int32x4 a_(a); - fallback::v_int32x4 b_(b); - fallback::v_int64x2 c_(c); - return fallback::v_dotprod(a_, b_, c_); -#endif } // 8 >> 32 @@ -2515,32 +2412,32 @@ inline v_int32x4 v_dotprod_expand(const v_int8x16& a, const v_int8x16& b, const // 16 >> 64 inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b) { - fallback::v_uint16x8 a_(a); - fallback::v_uint16x8 b_(b); - return fallback::v_dotprod_expand(a_, b_); + v128_t a0 = wasm_u32x4_shr(wasm_i32x4_shl(a.val, 16), 16); + v128_t a1 = wasm_u32x4_shr(a.val, 16); + v128_t b0 = wasm_u32x4_shr(wasm_i32x4_shl(b.val, 16), 16); + v128_t b1 = wasm_u32x4_shr(b.val, 16); + return v_uint64x2(( + v_dotprod(v_int32x4(a0), v_int32x4(b0)) + + v_dotprod(v_int32x4(a1), v_int32x4(b1))).val + ); } inline v_uint64x2 v_dotprod_expand(const v_uint16x8& a, const v_uint16x8& b, const v_uint64x2& c) -{ - fallback::v_uint16x8 a_(a); - fallback::v_uint16x8 b_(b); - fallback::v_uint64x2 c_(c); - return fallback::v_dotprod_expand(a_, b_, c_); -} +{ return v_dotprod_expand(a, b) + c; } inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b) { - fallback::v_int16x8 a_(a); - fallback::v_int16x8 b_(b); - return fallback::v_dotprod_expand(a_, b_); + v128_t a0 = wasm_i32x4_shr(wasm_i32x4_shl(a.val, 16), 16); + v128_t a1 = wasm_i32x4_shr(a.val, 16); + v128_t b0 = wasm_i32x4_shr(wasm_i32x4_shl(b.val, 16), 16); + v128_t b1 = wasm_i32x4_shr(b.val, 16); + return v_int64x2(( + v_dotprod(v_int32x4(a0), v_int32x4(b0)) + + v_dotprod(v_int32x4(a1), v_int32x4(b1))) + ); } inline v_int64x2 v_dotprod_expand(const v_int16x8& a, const v_int16x8& b, const v_int64x2& c) -{ - fallback::v_int16x8 a_(a); - fallback::v_int16x8 b_(b); - fallback::v_int64x2 c_(c); - return fallback::v_dotprod_expand(a_, b_, c_); -} +{ return v_dotprod_expand(a, b) + c; } // 32 >> 64f inline v_float64x2 v_dotprod_expand(const v_int32x4& a, const v_int32x4& b) @@ -2610,44 +2507,24 @@ OPENCV_HAL_IMPL_WASM_LOGIC_OP(v_float64x2) inline v_float32x4 v_sqrt(const v_float32x4& x) { -#ifdef __wasm_unimplemented_simd128__ return v_float32x4(wasm_f32x4_sqrt(x.val)); -#else - fallback::v_float32x4 x_(x); - return fallback::v_sqrt(x_); -#endif } inline v_float32x4 v_invsqrt(const v_float32x4& x) { -#ifdef __wasm_unimplemented_simd128__ const v128_t _1_0 = wasm_f32x4_splat(1.0); return v_float32x4(wasm_f32x4_div(_1_0, wasm_f32x4_sqrt(x.val))); -#else - fallback::v_float32x4 x_(x); - return fallback::v_invsqrt(x_); -#endif } inline v_float64x2 v_sqrt(const v_float64x2& x) { -#ifdef __wasm_unimplemented_simd128__ return v_float64x2(wasm_f64x2_sqrt(x.val)); -#else - fallback::v_float64x2 x_(x); - return fallback::v_sqrt(x_); -#endif } inline v_float64x2 v_invsqrt(const v_float64x2& x) { -#ifdef __wasm_unimplemented_simd128__ const v128_t _1_0 = wasm_f64x2_splat(1.0); return v_float64x2(wasm_f64x2_div(_1_0, wasm_f64x2_sqrt(x.val))); -#else - fallback::v_float64x2 x_(x); - return fallback::v_invsqrt(x_); -#endif } #define OPENCV_HAL_IMPL_WASM_ABS_INT_FUNC(_Tpuvec, _Tpsvec, suffix, zsuffix, shiftWidth) \ @@ -2666,12 +2543,7 @@ inline v_float32x4 v_abs(const v_float32x4& x) { return v_float32x4(wasm_f32x4_abs(x.val)); } inline v_float64x2 v_abs(const v_float64x2& x) { -#ifdef __wasm_unimplemented_simd128__ return v_float64x2(wasm_f64x2_abs(x.val)); -#else - fallback::v_float64x2 x_(x); - return fallback::v_abs(x_); -#endif } // TODO: exp, log, sin, cos @@ -2684,21 +2556,8 @@ inline _Tpvec func(const _Tpvec& a, const _Tpvec& b) \ OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_min, wasm_f32x4_min) OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float32x4, v_max, wasm_f32x4_max) - -#ifdef __wasm_unimplemented_simd128__ OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_min, wasm_f64x2_min) OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_float64x2, v_max, wasm_f64x2_max) -#else -#define OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(func) \ -inline v_float64x2 func(const v_float64x2& a, const v_float64x2& b) \ -{ \ - fallback::v_float64x2 a_(a), b_(b); \ - return fallback::func(a_, b_); \ -} - -OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(v_min) -OPENCV_HAL_IMPL_WASM_MINMAX_64f_FUNC(v_max) -#endif #define OPENCV_HAL_IMPL_WASM_MINMAX_S_INIT_FUNC(_Tpvec, suffix) \ inline _Tpvec v_min(const _Tpvec& a, const _Tpvec& b) \ @@ -2753,24 +2612,7 @@ OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int16x8, i16x8, i16x8) OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_uint32x4, u32x4, i32x4) OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_int32x4, i32x4, i32x4) OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float32x4, f32x4, f32x4) - -#ifdef __wasm_unimplemented_simd128__ OPENCV_HAL_IMPL_WASM_INIT_CMP_OP(v_float64x2, f64x2, f64x2) -#else -#define OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(_Tpvec, bin_op) \ -inline _Tpvec operator bin_op (const _Tpvec& a, const _Tpvec& b) \ -{ \ - fallback::_Tpvec a_(a), b_(b); \ - return _Tpvec((a_) bin_op (b_));\ -} \ - -OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, ==) -OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, !=) -OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, <) -OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, >) -OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, <=) -OPENCV_HAL_IMPL_INIT_FALLBACK_CMP_OP(v_float64x2, >=) -#endif #define OPENCV_HAL_IMPL_WASM_64BIT_CMP_OP(_Tpvec, cast) \ inline _Tpvec operator == (const _Tpvec& a, const _Tpvec& b) \ @@ -2789,14 +2631,9 @@ inline v_float32x4 v_not_nan(const v_float32x4& a) } inline v_float64x2 v_not_nan(const v_float64x2& a) { -#ifdef __wasm_unimplemented_simd128__ v128_t z = wasm_i64x2_splat(0x7fffffffffffffff); v128_t t = wasm_i64x2_splat(0x7ff0000000000000); return v_float64x2((__u64x2)(wasm_v128_and(a.val, z)) < (__u64x2)t); -#else - fallback::v_float64x2 a_(a); - return fallback::v_not_nan(a_); -#endif } OPENCV_HAL_IMPL_WASM_BIN_FUNC(v_uint8x16, v_add_wrap, wasm_i8x16_add) @@ -2877,32 +2714,30 @@ inline v_float32x4 v_absdiff(const v_float32x4& a, const v_float32x4& b) } inline v_float64x2 v_absdiff(const v_float64x2& a, const v_float64x2& b) { -#ifdef __wasm_unimplemented_simd128__ v128_t absmask_vec = wasm_u64x2_shr(wasm_i32x4_splat(-1), 1); return v_float64x2(wasm_v128_and(wasm_f64x2_sub(a.val, b.val), absmask_vec)); -#else - fallback::v_float64x2 a_(a), b_(b); - return fallback::v_absdiff(a_, b_); -#endif } -#define OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(_Tpvec) \ +#define OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(_Tpvec, suffix) \ inline _Tpvec v_magnitude(const _Tpvec& a, const _Tpvec& b) \ { \ - fallback::_Tpvec a_(a), b_(b); \ - return fallback::v_magnitude(a_, b_); \ + v128_t a_Square = wasm_##suffix##_mul(a.val, a.val); \ + v128_t b_Square = wasm_##suffix##_mul(b.val, b.val); \ + return _Tpvec(wasm_##suffix##_sqrt(wasm_##suffix##_add(a_Square, b_Square))); \ } \ inline _Tpvec v_sqr_magnitude(const _Tpvec& a, const _Tpvec& b) \ { \ - return v_fma(a, a, b*b); \ + v128_t a_Square = wasm_##suffix##_mul(a.val, a.val); \ + v128_t b_Square = wasm_##suffix##_mul(b.val, b.val); \ + return _Tpvec(wasm_##suffix##_add(a_Square, b_Square)); \ } \ inline _Tpvec v_muladd(const _Tpvec& a, const _Tpvec& b, const _Tpvec& c) \ { \ - return v_fma(a, b, c); \ + return _Tpvec(wasm_##suffix##_add(wasm_##suffix##_mul(a.val, b.val), c.val)); \ } -OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float32x4) -OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float64x2) +OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float32x4, f32x4) +OPENCV_HAL_IMPL_WASM_MISC_FLT_OP(v_float64x2, f64x2) #define OPENCV_HAL_IMPL_WASM_SHIFT_OP(_Tpuvec, _Tpsvec, suffix, ssuffix) \ inline _Tpuvec operator << (const _Tpuvec& a, int imm) \ @@ -2945,37 +2780,7 @@ inline _Tpsvec v_shr(const _Tpsvec& a) \ OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint8x16, v_int8x16, i8x16, u8x16) OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint16x8, v_int16x8, i16x8, u16x8) OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint32x4, v_int32x4, i32x4, u32x4) - -#ifdef __wasm_unimplemented_simd128__ OPENCV_HAL_IMPL_WASM_SHIFT_OP(v_uint64x2, v_int64x2, i64x2, u64x2) -#else -#define OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(_Tpvec) \ -inline _Tpvec operator << (const _Tpvec& a, int imm) \ -{ \ - fallback::_Tpvec a_(a); \ - return a_ << imm; \ -} \ -inline _Tpvec operator >> (const _Tpvec& a, int imm) \ -{ \ - fallback::_Tpvec a_(a); \ - return a_ >> imm; \ -} \ -template \ -inline _Tpvec v_shl(const _Tpvec& a) \ -{ \ - fallback::_Tpvec a_(a); \ - return fallback::v_shl(a_); \ -} \ -template \ -inline _Tpvec v_shr(const _Tpvec& a) \ -{ \ - fallback::_Tpvec a_(a); \ - return fallback::v_shr(a_); \ -} \ - -OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(v_uint64x2) -OPENCV_HAL_IMPL_FALLBACK_SHIFT_OP(v_int64x2) -#endif namespace hal_wasm_internal { @@ -3180,9 +2985,18 @@ OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint8x16, unsigned) OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int8x16, int) OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint16x8, unsigned) OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int16x8, int) -OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_uint64x2, uint64) -OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_int64x2, int64) -OPENCV_HAL_IMPL_FALLBACK_REDUCE_OP_SUM(v_float64x2, double) + + +#define OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(_Tpvec, scalartype, regtype, suffix, esuffix) \ +inline scalartype v_reduce_sum(const _Tpvec& a) \ +{ \ + regtype val = a.val; \ + val = wasm_##suffix##_add(val, wasm_v8x16_shuffle(val, val, 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7)); \ + return (scalartype)wasm_##esuffix##_extract_lane(val, 0); \ +} +OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_uint64x2, uint64, v128_t, i64x2, i64x2) +OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_int64x2, int64, v128_t, i64x2, i64x2) +OPENCV_HAL_IMPL_WASM_REDUCE_OP_2_SUM(v_float64x2, double, v128_t, f64x2,f64x2) inline v_float32x4 v_reduce_sum4(const v_float32x4& a, const v_float32x4& b, const v_float32x4& c, const v_float32x4& d) @@ -3318,30 +3132,27 @@ OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int16x8, i16x8, short) OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_uint32x4, i32x4, int) OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_int32x4, i32x4, int) OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_float32x4, i32x4, float) +OPENCV_HAL_IMPL_WASM_CHECK_SIGNS(v_float64x2, f64x2, double) + +#define OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(_Tpvec, suffix, esuffix) \ +inline bool v_check_all(const _Tpvec& a) \ +{ \ + v128_t masked = v_reinterpret_as_##esuffix(a).val; \ + masked = wasm_i32x4_replace_lane(masked, 0, 0xffffffff); \ + masked = wasm_i32x4_replace_lane(masked, 2, 0xffffffff); \ + return wasm_i8x16_all_true(wasm_##suffix##_lt(masked, wasm_##suffix##_splat(0))); \ +} \ +inline bool v_check_any(const _Tpvec& a) \ +{ \ + v128_t masked = v_reinterpret_as_##esuffix(a).val; \ + masked = wasm_i32x4_replace_lane(masked, 0, 0x0); \ + masked = wasm_i32x4_replace_lane(masked, 2, 0x0); \ + return wasm_i8x16_any_true(wasm_##suffix##_lt(masked, wasm_##suffix##_splat(0))); \ +} \ + +OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(v_int64x2, i32x4, s32) +OPENCV_HAL_IMPL_WASM_CHECK_ALL_ANY(v_uint64x2, i32x4, u32) -inline int v_signmask(const v_float64x2& a) -{ - fallback::v_float64x2 a_(a); - return fallback::v_signmask(a_); -} -inline bool v_check_all(const v_float64x2& a) -{ -#ifdef __wasm_unimplemented_simd128__ - return wasm_i8x16_all_true((__i64x2)(a.val) < (__i64x2)(wasm_i64x2_splat(0))); -#else - fallback::v_float64x2 a_(a); - return fallback::v_check_all(a_); -#endif -} -inline bool v_check_any(const v_float64x2& a) -{ -#ifdef __wasm_unimplemented_simd128__ - return wasm_i8x16_any_true((__i64x2)(a.val) < (__i64x2)(wasm_i64x2_splat(0)));; -#else - fallback::v_float64x2 a_(a); - return fallback::v_check_any(a_); -#endif -} inline int v_scan_forward(const v_int8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); } inline int v_scan_forward(const v_uint8x16& a) { return trailingZeros32(v_signmask(v_reinterpret_as_s8(a))); } @@ -3366,8 +3177,8 @@ OPENCV_HAL_IMPL_WASM_SELECT(v_uint16x8) OPENCV_HAL_IMPL_WASM_SELECT(v_int16x8) OPENCV_HAL_IMPL_WASM_SELECT(v_uint32x4) OPENCV_HAL_IMPL_WASM_SELECT(v_int32x4) -// OPENCV_HAL_IMPL_WASM_SELECT(v_uint64x2) -// OPENCV_HAL_IMPL_WASM_SELECT(v_int64x2) +OPENCV_HAL_IMPL_WASM_SELECT(v_uint64x2) +OPENCV_HAL_IMPL_WASM_SELECT(v_int64x2) OPENCV_HAL_IMPL_WASM_SELECT(v_float32x4) OPENCV_HAL_IMPL_WASM_SELECT(v_float64x2) diff --git a/modules/js/CMakeLists.txt b/modules/js/CMakeLists.txt index 62fd1bac9f..f3a625b37e 100644 --- a/modules/js/CMakeLists.txt +++ b/modules/js/CMakeLists.txt @@ -175,3 +175,22 @@ endforeach() add_custom_target(${PROJECT_NAME}_perf ALL DEPENDS ${OCV_JS_PATH} ${opencv_perf_js_file_deps}) + +#loader +set(opencv_loader_js_bin_dir "${EXECUTABLE_OUTPUT_PATH}") +set(loader_dir ${CMAKE_CURRENT_SOURCE_DIR}/src) + +set(opencv_loader_js_file_deps "") + +# make sure the build directory exists +file(MAKE_DIRECTORY "${opencv_loader_js_bin_dir}") + +add_custom_command( + TARGET ${PROJECT_NAME} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy + ${loader_dir}/loader.js + ${opencv_loader_js_bin_dir}/loader.js) +list(APPEND opencv_loader_js_file_deps "${loader_dir}/loader.js" "${opencv_loader_js_bin_dir}/loader.js") + +add_custom_target(${PROJECT_NAME}_loader ALL + DEPENDS ${OCV_JS_PATH} ${opencv_loader_js_file_deps}) \ No newline at end of file diff --git a/modules/js/perf/base.js b/modules/js/perf/base.js index 6c2e772e30..3948f21254 100644 --- a/modules/js/perf/base.js +++ b/modules/js/perf/base.js @@ -2,17 +2,28 @@ if (typeof window === 'undefined') { var cv = require("../opencv"); } -const cvSize = { - szODD: new cv.Size(127, 61), - szQVGA: new cv.Size(320, 240), - szVGA: new cv.Size(640, 480), - szqHD: new cv.Size(960, 540), - sz720p: new cv.Size(1280, 720), - sz1080p: new cv.Size(1920, 1080), - sz130x60: new cv.Size(130, 60), - sz213x120: new cv.Size(120 * 1280 / 720, 120), +let gCvSize; + +function getCvSize() { + if (gCvSize === undefined) { + gCvSize = { + szODD: new cv.Size(127, 61), + szQVGA: new cv.Size(320, 240), + szVGA: new cv.Size(640, 480), + szSVGA: new cv.Size(800, 600), + szqHD: new cv.Size(960, 540), + szXGA: new cv.Size(1024, 768), + sz720p: new cv.Size(1280, 720), + szSXGA: new cv.Size(1280, 1024), + sz1080p: new cv.Size(1920, 1080), + sz130x60: new cv.Size(130, 60), + sz213x120: new cv.Size(120 * 1280 / 720, 120), + }; + } + + return gCvSize; } if (typeof window === 'undefined') { - exports.cvSize = cvSize; + exports.getCvSize = getCvSize; } \ No newline at end of file diff --git a/modules/js/perf/perf_64bits.html b/modules/js/perf/perf_64bits.html new file mode 100644 index 0000000000..efbe808fbd --- /dev/null +++ b/modules/js/perf/perf_64bits.html @@ -0,0 +1,67 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Functions for 64-bit Perf

+ CountnonZero, Mat::dot, Split, Merge +
+
+

Mat Shape

+ for example: (1000x1000) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_64bits.js b/modules/js/perf/perf_64bits.js new file mode 100644 index 0000000000..dc4e234d4c --- /dev/null +++ b/modules/js/perf/perf_64bits.js @@ -0,0 +1,180 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + } else { + runButton.removeAttribute('disabled'); + runButton.setAttribute('class', 'btn btn-primary'); + runButton.innerHTML = 'Run'; + } + let totalCaseNum, currentCaseId; + + + function addCountNonZeroCase(suite) { + suite.add('countNonZero', function() { + cv.countNonZero(mat); + }, { + 'setup': function() { + let size = this.params.size; + let mat = cv.Mat.eye(size[0], size[1], cv.CV_64F); + }, 'teardown': function() { + mat.delete(); + } + }); + } + + function addMatDotCase(suite) { + suite.add('Mat::dot', function() { + mat.dot(matT); + }, { + 'setup': function() { + let size = this.params.size; + let mat = cv.Mat.ones(size[0], size[1], cv.CV_64FC1); + let matT = mat.t(); + }, 'teardown': function() { + mat.delete(); + matT.delete(); + } + }); + } + + function addSplitCase(suite) { + suite.add('Split', function() { + cv.split(mat, planes); + }, { + 'setup': function() { + let size = this.params.size; + let mat = cv.Mat.ones(size[0], size[1], cv.CV_64FC3); + let planes = new cv.MatVector(); + }, 'teardown': function() { + mat.delete(); + planes.delete(); + } + }); + } + + function addMergeCase(suite) { + suite.add('Merge', function() { + cv.merge(planes, mat); + }, { + 'setup': function() { + let size = this.params.size; + let mat = new cv.Mat(); + let mat1 = cv.Mat.ones(size[0], size[1], cv.CV_64FC3); + let planes = new cv.MatVector(); + cv.split(mat1, planes); + }, 'teardown': function() { + mat.delete(); + mat1.delete(); + planes.delete(); + } + }); + } + + function setInitParams(suite, sizeArray) { + for( let i =0; i < suite.length; i++) { + suite[i].params = { + size: sizeArray + }; + } + } + + function log(message) { + console.log(message); + if (!isNodeJs) { + logElement.innerHTML += `\n${'\t' + message}`; + } + } + + function setBenchmarkSuite(suite) { + suite + // add listeners + .on('cycle', function(event) { + ++currentCaseId; + let size = event.target.params.size; + log(`=== ${event.target.name} ${currentCaseId} ===`); + log(`params: (${parseInt(size[0])}x${parseInt(size[1])})`); + log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms'); + log('mean time:' +String(event.target.stats.mean*1000)+' ms'); + log('stddev time:' +String(event.target.stats.deviation*1000)+' ms'); + log(String(event.target)); + }) + .on('error', function(event) { log(`test case ${event.target.name} failed`); }) + .on('complete', function(event) { + log(`\n ###################################`) + log(`Finished testing ${event.currentTarget.length} cases \n`); + if (!isNodeJs) { + runButton.removeAttribute('disabled'); + runButton.setAttribute('class', 'btn btn-primary'); + runButton.innerHTML = 'Run'; + } + }); + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + var sizeArray; + totalCaseNum = 4; + currentCaseId = 0; + if (/\([0-9]+x[0-9]+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+\)/g)[0]; + let sizeStrs = (params.match(/[0-9]+/g) || []).slice(0, 2).toString().split(","); + sizeArray = sizeStrs.map(Number); + } else { + log("no getting invalid params, run all the cases with Mat of shape (1000 x 1000)"); + sizeArray = [1000, 1000]; + } + addCountNonZeroCase(suite); + addMatDotCase(suite); + addSplitCase(suite); + addMergeCase(suite); + setInitParams(suite, sizeArray) + setBenchmarkSuite(suite); + log(`Running ${totalCaseNum} tests from 64-bit intrinsics`); + suite.run({ 'async': true }); // run the benchmark + } + + + // set test filter params + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + runButton.setAttribute("disabled", "disabled"); + runButton.setAttribute('class', 'btn btn-primary disabled'); + runButton.innerHTML = "Running"; + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_helpfunc.js b/modules/js/perf/perf_helpfunc.js index e07e3a297e..e42f4ad807 100644 --- a/modules/js/perf/perf_helpfunc.js +++ b/modules/js/perf/perf_helpfunc.js @@ -16,14 +16,57 @@ var fillGradient = function(cv, img, delta=5) { } } +var smoothBorder = function(cv, img, color, delta=5) { + let ch = img.channels(); + console.assert(!img.empty() && img.depth() == cv.CV_8U && ch <= 4); + + let n = 100/delta; + let nR = Math.min(n, (img.rows+1)/2); + let nC = Math.min(n, (img.cols+1)/2); + let s = new cv.Scalar(); + + for (let r = 0; r < nR; r++) { + let k1 = r*delta/100.0, k2 = 1-k1; + for(let c = 0; c < img.cols; c++) { + let view = img.ptr(r, c); + for(let i = 0; i < ch; i++) s[i] = view[i]; + for(let i = 0; i < ch; i++) view[i] = s[i]*k1 + color[i] * k2; + } + for(let c=0; c < img.cols; c++) { + let view = img.ptr(img.rows-r-1, c); + for(let i = 0; i < ch; i++) s[i] = view[i]; + for(let i = 0; i < ch; i++) view[i] = s[i]*k1 + color[i] * k2; + } + } + for (let r = 0; r < img.rows; r++) { + for(let c = 0; c < nC; c++) { + let k1 = c*delta/100.0, k2 = 1-k1; + let view = img.ptr(r, c); + for(let i = 0; i < ch; i++) s[i] = view[i]; + for(let i = 0; i < ch; i++) view[i] = s[i]*k1 + color[i] * k2; + } + for(let c = 0; c < n; c++) { + let k1 = c*delta/100.0, k2 = 1-k1; + let view = img.ptr(r, img.cols-c-1); + for(let i = 0; i < ch; i++) s[i] = view[i]; + for(let i = 0; i < ch; i++) view[i] = s[i]*k1 + color[i] * k2; + } + } +} + var cvtStr2cvSize = function(strSize) { let size; + + let cvSize = getCvSize(); switch(strSize) { case "127,61": size = cvSize.szODD;break; case '320,240': size = cvSize.szQVGA;break; case '640,480': size = cvSize.szVGA;break; + case '800,600': size = cvSize.szSVGA;break; case '960,540': size = cvSize.szqHD;break; + case '1024,768': size = cvSize.szXGA;break; case '1280,720': size = cvSize.sz720p;break; + case '1280,1024': size = cvSize.szSXGA;break; case '1920,1080': size = cvSize.sz1080p;break; case "130,60": size = cvSize.sz130x60;break; case '213,120': size = cvSize.sz213x120;break; @@ -52,8 +95,209 @@ function permute (source, target) { return result; } +var constructMode = function (startStr, sChannel, dChannel) { + let modeList = [] + for (let j in dChannel) { + modeList.push(startStr+sChannel+"2"+dChannel[j]) + } + return modeList; +} + +var enableButton = function () { + runButton.removeAttribute('disabled'); + runButton.setAttribute('class', 'btn btn-primary'); + runButton.innerHTML = 'Run'; +} + +var disableButton = function () { + runButton.setAttribute("disabled", "disabled"); + runButton.setAttribute('class', 'btn btn-primary disabled'); + runButton.innerHTML = "Running"; +} + +var log = function (message) { + console.log(message); + if (!isNodeJs) { + logElement.innerHTML += `\n${'\t' + message}`; + } +} + +var addKernelCase = function (suite, params, type, kernelFunc) { + kernelFunc(suite, type); + let index = suite.length - 1; + suite[index].params = params; +} + +function constructParamLog(params, kernel) { + let paramLog = ''; + if (kernel == "cvtcolor") { + let mode = params.mode; + let size = params.size; + paramLog = `params: (${parseInt(size[0])}x${parseInt(size[1])}, ${mode})`; + } else if (kernel == "resize") { + let matType = params.matType; + let size1 = params.from; + let size2 = params.to; + paramLog = `params: (${matType},${parseInt(size1.width)}x${parseInt(size1.height)},`+ + `${parseInt(size2.width)}x${parseInt(size2.height)})`; + } else if (kernel == "threshold") { + let matSize = params.matSize; + let matType = params.matType; + let threshType = params.threshType; + paramLog = `params: (${parseInt(matSize.width)}x${parseInt(matSize.height)},`+ + `${matType},${threshType})`; + } else if (kernel == "sobel") { + let size = params.size; + let ddepth = params.ddepth; + let dxdy = params.dxdy; + let ksize = params.ksize; + let borderType = params.borderType; + paramLog = `params: (${parseInt(size[0])}x${parseInt(size[1])},`+ + `${ddepth},${dxdy},${borderType}, ksize:${ksize})`; + } else if (kernel == "filter2d") { + let size = params.size; + let ksize = params.ksize; + let borderMode = params.borderMode; + paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+ + `${ksize},${borderMode})`; + } else if (kernel == "scharr") { + let size = params.size; + let ddepth = params.ddepth; + let dxdy = params.dxdy; + let borderType = params.borderType; + paramLog = `params: (${parseInt(size[0])}x${parseInt(size[1])},`+ + `${ddepth},${dxdy},${borderType})`; + } else if (kernel == "gaussianBlur" || kernel == "blur") { + let size = params.size; + let matType = params.matType; + let borderType = params.borderType; + let ksize = params.ksize; + paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+ + `${matType},${borderType}, ksize: (${ksize}x${ksize}))`; + } else if (kernel == "medianBlur") { + let size = params.size; + let matType = params.matType; + let ksize = params.ksize; + paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+ + `${matType}, ksize: ${ksize})`; + } else if (kernel == "erode" || kernel == "dilate" || kernel == "pyrDown") { + let size = params.size; + let matType = params.matType; + paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+ + `${matType})`; + } else if (kernel == "remap") { + let size = params.size; + let matType = params.matType; + let mapType = params.mapType; + let interType = params.interType; + paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+ + `${matType}, ${mapType}, ${interType})`; + } else if (kernel == "warpAffine" || kernel == "warpPerspective") { + let size = params.size; + let interType = params.interType; + let borderMode = params.borderMode; + paramLog = `params: (${parseInt(size.width)}x${parseInt(size.height)},`+ + `${interType}, ${borderMode})`; + } + return paramLog; +} + +var setBenchmarkSuite = function (suite, kernel, currentCaseId) { + suite + // add listeners + .on('cycle', function(event) { + ++currentCaseId; + let params = event.target.params; + paramLog = constructParamLog(params, kernel); + + log(`=== ${event.target.name} ${currentCaseId} ===`); + log(paramLog); + log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms'); + log('mean time:' +String(event.target.stats.mean*1000)+' ms'); + log('stddev time:' +String(event.target.stats.deviation*1000)+' ms'); + log(String(event.target)); + }) + .on('error', function(event) { log(`test case ${event.target.name} failed`); }) + .on('complete', function(event) { + log(`\n ###################################`) + log(`Finished testing ${event.currentTarget.length} cases \n`); + if (!isNodeJs) { + runButton.removeAttribute('disabled'); + runButton.setAttribute('class', 'btn btn-primary'); + runButton.innerHTML = 'Run'; + } + }); +} + +var decodeParams2Case = function(paramContent, paramsList, combinations) { + let sizeString = (paramContent.match(/[0-9]+x[0-9]+/g) || []).toString(); + let sizes = (sizeString.match(/[0-9]+/g) || []); + let paramSize = paramsList.length; + let paramObjs = [] + let sizeCount = 0; + for (let i = 0; i < paramSize; i++) { + let param = paramsList[i]; + let paramName = param.name; + let paramValue = param.value; + let paramReg = param.reg; + let paramIndex = param.index; + + if(paramValue != "") { + paramObjs.push({name: paramName, value: paramValue, index: paramIndex}); + } else if (paramName.startsWith('size')) { + let sizeStr = sizes.slice(sizeCount, sizeCount+2).toString(); + paramValue = cvtStr2cvSize(sizeStr); + sizeCount += 2; + paramObjs.push({name: paramName, value: paramValue, index: paramIndex}); + } else { + for (let index in paramReg) { + let reg = eval(paramReg[index]); + if ('loc' in param) { + paramValue = (paramContent.match(reg) || [])[param.loc].toString(); + } else { + paramValue = (paramContent.match(reg) || []).toString(); + } + + if (paramValue != "") { + paramObjs.push({name: paramName, value: paramValue, index: paramIndex}); + break; + } + } + } + } + + let location = []; + for (let i = 0; i < combinations.length; ++i) { + let combination = combinations[i]; + for (let j = 0; j < combination.length; ++j) { + if (judgeCombin(combination[j], paramObjs)) { + location.push([i,j]); + } + } + } + return location; +} + +function judgeCombin(combination, paramObjs) { + for (let i =0; i < paramObjs.length; i++) { + if (paramObjs[i].value != combination[paramObjs[i].index]){ + return false; + } + } + return true; +} + + if (typeof window === 'undefined') { + exports.enableButton = enableButton; + exports.disableButton = disableButton; exports.fillGradient = fillGradient; + exports.smoothBorder = smoothBorder; exports.cvtStr2cvSize = cvtStr2cvSize; exports.combine = combine; + exports.constructMode = constructMode; + exports.log = log; + exports.decodeParams2Case = decodeParams2Case; + exports.setBenchmarkSuite = setBenchmarkSuite; + exports.addKernelCase = addKernelCase; } \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_blur.html b/modules/js/perf/perf_imgproc/perf_blur.html new file mode 100644 index 0000000000..c6fae45db0 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_blur.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ Blur +
+
+

Parameters Filter

+ for example: (1280x720, CV_8UC1, BORDER_REPLICATE) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_blur.js b/modules/js/perf/perf_imgproc/perf_blur.js new file mode 100644 index 0000000000..59712fb478 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_blur.js @@ -0,0 +1,130 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const BlurSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA, cvSize.sz720p]; + const Blur5x16Size = [cvSize.szVGA, cvSize.sz720p]; + const BlurType = ["CV_8UC1", "CV_8UC4", "CV_16UC1", "CV_16SC1", "CV_32FC1"]; + const BlurType5x5 = ["CV_8UC1", "CV_8UC4", "CV_16UC1", "CV_16SC1", "CV_32FC1", "CV_32FC3"]; + const BorderType3x3 = ["BORDER_REPLICATE", "BORDER_CONSTANT"]; + const BorderTypeAll = ["BORDER_REPLICATE", "BORDER_CONSTANT", "BORDER_REFLECT", "BORDER_REFLECT101"]; + + const combiBlur3x3 = combine(BlurSize, BlurType, BorderType3x3); + const combiBlur16x16 = combine(Blur5x16Size, BlurType, BorderTypeAll); + const combiBlur5x5 = combine(Blur5x16Size, BlurType5x5, BorderTypeAll); + + function addBlurCase(suite, type) { + suite.add('blur', function() { + cv.blur(src, dst, ksize, new cv.Point(-1,-1), borderType); + }, { + 'setup': function() { + let size = this.params.size; + let matType = cv[this.params.matType]; + let borderType = cv[this.params.borderType]; + let ksizeNum = this.params.ksize; + let ksize = new cv.Size(ksizeNum, ksizeNum); + let src = new cv.Mat(size, matType); + let dst = new cv.Mat(size, matType); + }, + 'teardown': function() { + src.delete(); + dst.delete(); + } + }); + } + + function addBlurModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let matType = combination[i][1]; + let borderType = combination[i][2]; + let ksizeArray = [3, 16, 5]; + + let params = {size: size, matType:matType, ksize: ksizeArray[type], borderType:borderType}; + addKernelCase(suite, params, type, addBlurCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1}); + paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2}); + let locationList = decodeParams2Case(params, paramObjs,blurCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addBlurModeCase(suite, [blurCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addBlurModeCase(suite, combiBlur3x3, 0); + addBlurModeCase(suite, combiBlur16x16, 1); + addBlurModeCase(suite, combiBlur5x5, 2); + } + setBenchmarkSuite(suite, "blur", currentCaseId); + log(`Running ${totalCaseNum} tests from blur`); + suite.run({ 'async': true }); // run the benchmark + } + + let blurCombinations = [combiBlur3x3, combiBlur16x16, combiBlur5x5]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_cvtcolor.js b/modules/js/perf/perf_imgproc/perf_cvtcolor.js index 752691ef77..b5007985cc 100644 --- a/modules/js/perf/perf_imgproc/perf_cvtcolor.js +++ b/modules/js/perf/perf_imgproc/perf_cvtcolor.js @@ -11,17 +11,17 @@ if (isNodeJs) { var logElement = document.getElementById('log'); } -cv.onRuntimeInitialized = () => { +function perf() { + console.log('opencv.js loaded'); if (isNodeJs) { global.cv = cv; global.combine = HelpFunc.combine; global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; - global.cvSize = Base.cvSize; + global.cvSize = Base.getCvSize(); } else { - runButton.removeAttribute('disabled'); - runButton.setAttribute('class', 'btn btn-primary'); - runButton.innerHTML = 'Run'; + enableButton(); + cvSize = getCvSize(); } let totalCaseNum, currentCaseId; @@ -73,127 +73,78 @@ cv.onRuntimeInitialized = () => { cv.CX_YUV2RGBA = cv.COLOR_COLORCVT_MAX + cv.COLOR_YUV2RGB }; - const CvtMode = [ - "COLOR_BGR2BGR555", "COLOR_BGR2BGR565", "COLOR_BGR2BGRA", "COLOR_BGR2GRAY", - "COLOR_BGR2HLS", "COLOR_BGR2HLS_FULL", "COLOR_BGR2HSV", "COLOR_BGR2HSV_FULL", - "COLOR_BGR2Lab", "COLOR_BGR2Luv", "COLOR_BGR2RGB", "COLOR_BGR2RGBA", "COLOR_BGR2XYZ", - "COLOR_BGR2YCrCb", "COLOR_BGR2YUV", "COLOR_BGR5552BGR", "COLOR_BGR5552BGRA", - - "COLOR_BGR5552GRAY", "COLOR_BGR5552RGB", "COLOR_BGR5552RGBA", "COLOR_BGR5652BGR", - "COLOR_BGR5652BGRA", "COLOR_BGR5652GRAY", "COLOR_BGR5652RGB", "COLOR_BGR5652RGBA", - - "COLOR_BGRA2BGR", "COLOR_BGRA2BGR555", "COLOR_BGRA2BGR565", "COLOR_BGRA2GRAY", "COLOR_BGRA2RGBA", - "CX_BGRA2HLS", "CX_BGRA2HLS_FULL", "CX_BGRA2HSV", "CX_BGRA2HSV_FULL", - "CX_BGRA2Lab", "CX_BGRA2Luv", "CX_BGRA2XYZ", - "CX_BGRA2YCrCb", "CX_BGRA2YUV", - - "COLOR_GRAY2BGR", "COLOR_GRAY2BGR555", "COLOR_GRAY2BGR565", "COLOR_GRAY2BGRA", - - "COLOR_HLS2BGR", "COLOR_HLS2BGR_FULL", "COLOR_HLS2RGB", "COLOR_HLS2RGB_FULL", - "CX_HLS2BGRA", "CX_HLS2BGRA_FULL", "CX_HLS2RGBA", "CX_HLS2RGBA_FULL", - - "COLOR_HSV2BGR", "COLOR_HSV2BGR_FULL", "COLOR_HSV2RGB", "COLOR_HSV2RGB_FULL", - "CX_HSV2BGRA", "CX_HSV2BGRA_FULL", "CX_HSV2RGBA", "CX_HSV2RGBA_FULL", - - "COLOR_Lab2BGR", "COLOR_Lab2LBGR", "COLOR_Lab2LRGB", "COLOR_Lab2RGB", - "CX_Lab2BGRA", "CX_Lab2LBGRA", "CX_Lab2LRGBA", "CX_Lab2RGBA", - - "COLOR_LBGR2Lab", "COLOR_LBGR2Luv", "COLOR_LRGB2Lab", "COLOR_LRGB2Luv", - "CX_LBGRA2Lab", "CX_LBGRA2Luv", "CX_LRGBA2Lab", "CX_LRGBA2Luv", - - "COLOR_Luv2BGR", "COLOR_Luv2LBGR", "COLOR_Luv2LRGB", "COLOR_Luv2RGB", - "CX_Luv2BGRA", "CX_Luv2LBGRA", "CX_Luv2LRGBA", "CX_Luv2RGBA", - - "COLOR_RGB2BGR555", "COLOR_RGB2BGR565", "COLOR_RGB2GRAY", - "COLOR_RGB2HLS", "COLOR_RGB2HLS_FULL", "COLOR_RGB2HSV", "COLOR_RGB2HSV_FULL", - "COLOR_RGB2Lab", "COLOR_RGB2Luv", "COLOR_RGB2XYZ", "COLOR_RGB2YCrCb", "COLOR_RGB2YUV", - - "COLOR_RGBA2BGR", "COLOR_RGBA2BGR555", "COLOR_RGBA2BGR565", "COLOR_RGBA2GRAY", - "CX_RGBA2HLS", "CX_RGBA2HLS_FULL", "CX_RGBA2HSV", "CX_RGBA2HSV_FULL", - "CX_RGBA2Lab", "CX_RGBA2Luv", "CX_RGBA2XYZ", - "CX_RGBA2YCrCb", "CX_RGBA2YUV", - - "COLOR_XYZ2BGR", "COLOR_XYZ2RGB", "CX_XYZ2BGRA", "CX_XYZ2RGBA", - - "COLOR_YCrCb2BGR", "COLOR_YCrCb2RGB", "CX_YCrCb2BGRA", "CX_YCrCb2RGBA", - "COLOR_YUV2BGR", "COLOR_YUV2RGB", "CX_YUV2BGRA", "CX_YUV2RGBA" - ]; - const CvtModeSize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p]; - const combiCvtMode = combine(CvtModeSize, CvtMode); - // didn't support 16u and 32f perf tests according to // https://github.com/opencv/opencv/commit/4e679e1cc5b075ec006b29a58b4fe117523fba1d - const CvtMode16U = [ - "COLOR_BGR2BGRA", "COLOR_BGR2GRAY", - "COLOR_BGR2RGB", "COLOR_BGR2RGBA", "COLOR_BGR2XYZ", - "COLOR_BGR2YCrCb", "COLOR_BGR2YUV", + function constructCvtMode16U() { + let cvtMode16U = []; + cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "BGR", ["BGRA", "GRAY", "RGB", "RGBA", "XYZ", "YCrCb", "YUV"])); + cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "BGRA", ["BGR", "GRAY", "RGBA"])); + cvtMode16U = cvtMode16U.concat(constructMode("CX_", "BGRA", ["XYZ", "YCrCb", "YUV"])); + cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "GRAY", ["BGR", "BGRA"])); + cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "RGB", ["GRAY", "XYZ", "YCrCb", "YUV"])); + cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "RGBA", ["BGR", "GRAY"])); + cvtMode16U = cvtMode16U.concat(constructMode("CX_", "RGBA", ["XYZ", "YCrCb", "YUV"])); + cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "XYZ", ["BGR", "RGB"])); + cvtMode16U = cvtMode16U.concat(constructMode("CX_", "XYZ", ["BGRA", "RGBA"])); + cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "YCrCb", ["BGR", "RGB"])); + cvtMode16U = cvtMode16U.concat(constructMode("CX_", "YCrCb", ["BGRA", "RGBA"])); + cvtMode16U = cvtMode16U.concat(constructMode("COLOR_", "YUV", ["BGR", "RGB"])); + cvtMode16U = cvtMode16U.concat(constructMode("CX_", "YUV", ["BGRA", "RGBA"])); - "COLOR_BGRA2BGR", "COLOR_BGRA2GRAY", "COLOR_BGRA2RGBA", - "CX_BGRA2XYZ", - "CX_BGRA2YCrCb", "CX_BGRA2YUV", + return cvtMode16U; + } - "COLOR_GRAY2BGR", "COLOR_GRAY2BGRA", + const CvtMode16U = constructCvtMode16U(); - "COLOR_RGB2GRAY", - "COLOR_RGB2XYZ", "COLOR_RGB2YCrCb", "COLOR_RGB2YUV", - - "COLOR_RGBA2BGR", "COLOR_RGBA2GRAY", - "CX_RGBA2XYZ", - "CX_RGBA2YCrCb", "CX_RGBA2YUV", - - "COLOR_XYZ2BGR", "COLOR_XYZ2RGB", "CX_XYZ2BGRA", "CX_XYZ2RGBA", - - "COLOR_YCrCb2BGR", "COLOR_YCrCb2RGB", "CX_YCrCb2BGRA", "CX_YCrCb2RGBA", - "COLOR_YUV2BGR", "COLOR_YUV2RGB", "CX_YUV2BGRA", "CX_YUV2RGBA" - ]; const CvtMode16USize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p]; const combiCvtMode16U = combine(CvtMode16USize, CvtMode16U); - const CvtMode32F = [ - "COLOR_BGR2BGRA", "COLOR_BGR2GRAY", - "COLOR_BGR2HLS", "COLOR_BGR2HLS_FULL", "COLOR_BGR2HSV", "COLOR_BGR2HSV_FULL", - "COLOR_BGR2Lab", "COLOR_BGR2Luv", "COLOR_BGR2RGB", "COLOR_BGR2RGBA", "COLOR_BGR2XYZ", - "COLOR_BGR2YCrCb", "COLOR_BGR2YUV", + function constructCvtMode32F(source) { + let cvtMode32F = source; + cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "BGR", ["HLS", "HLS_FULL", "HSV", "HSV_FULL", "Lab", "Luv"])); + cvtMode32F = cvtMode32F.concat(constructMode("CX_", "BGRA", ["HLS", "HLS_FULL", "HSV", "HSV_FULL", "Lab", "Luv"])); + cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "HLS", ["BGR", "BGR_FULL", "RGB", "RGB_FULL"])); + cvtMode32F = cvtMode32F.concat(constructMode("CX_", "HLS", ["BGRA", "BGRA_FULL", "RGBA", "RGBA_FULL"])); + cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "HSV", ["BGR", "BGR_FULL", "RGB", "RGB_FULL"])); + cvtMode32F = cvtMode32F.concat(constructMode("CX_", "HSV", ["BGRA", "BGRA_FULL", "RGBA", "RGBA_FULL"])); + cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "Lab", ["BGR", "LBGR", "RGB", "LRGB"])); + cvtMode32F = cvtMode32F.concat(constructMode("CX_", "Lab", ["BGRA", "LBGRA", "RGBA", "LRGBA"])); + cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "Luv", ["BGR", "LBGR", "RGB", "LRGB"])); + cvtMode32F = cvtMode32F.concat(constructMode("CX_", "Luv", ["BGRA", "LBGRA", "RGBA", "LRGBA"])); + cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "LBGR", ["Lab", "Luv"])); + cvtMode32F = cvtMode32F.concat(constructMode("CX_", "LBGRA", ["Lab", "Luv"])); + cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "LRGB", ["Lab", "Luv"])); + cvtMode32F = cvtMode32F.concat(constructMode("CX_", "LRGBA", ["Lab", "Luv"])); + cvtMode32F = cvtMode32F.concat(constructMode("COLOR_", "RGB", ["HLS", "HLS_FULL", "HSV", "HSV_FULL", "Lab", "Luv"])); + cvtMode32F = cvtMode32F.concat(constructMode("CX_", "RGBA", ["HLS", "HLS_FULL", "HSV", "HSV_FULL", "Lab", "Luv"])); - "COLOR_BGRA2BGR", "COLOR_BGRA2GRAY", "COLOR_BGRA2RGBA", - "CX_BGRA2HLS", "CX_BGRA2HLS_FULL", "CX_BGRA2HSV", "CX_BGRA2HSV_FULL", - "CX_BGRA2Lab", "CX_BGRA2Luv", "CX_BGRA2XYZ", - "CX_BGRA2YCrCb", "CX_BGRA2YUV", + return cvtMode32F; + } - "COLOR_GRAY2BGR", "COLOR_GRAY2BGRA", + const CvtMode32F = constructCvtMode32F(CvtMode16U); - "COLOR_HLS2BGR", "COLOR_HLS2BGR_FULL", "COLOR_HLS2RGB", "COLOR_HLS2RGB_FULL", - "CX_HLS2BGRA", "CX_HLS2BGRA_FULL", "CX_HLS2RGBA", "CX_HLS2RGBA_FULL", - - "COLOR_HSV2BGR", "COLOR_HSV2BGR_FULL", "COLOR_HSV2RGB", "COLOR_HSV2RGB_FULL", - "CX_HSV2BGRA", "CX_HSV2BGRA_FULL", "CX_HSV2RGBA", "CX_HSV2RGBA_FULL", - - "COLOR_Lab2BGR", "COLOR_Lab2LBGR", "COLOR_Lab2LRGB", "COLOR_Lab2RGB", - "CX_Lab2BGRA", "CX_Lab2LBGRA", "CX_Lab2LRGBA", "CX_Lab2RGBA", - - "COLOR_LBGR2Lab", "COLOR_LBGR2Luv", "COLOR_LRGB2Lab", "COLOR_LRGB2Luv", - "CX_LBGRA2Lab", "CX_LBGRA2Luv", "CX_LRGBA2Lab", "CX_LRGBA2Luv", - - "COLOR_Luv2BGR", "COLOR_Luv2LBGR", "COLOR_Luv2LRGB", "COLOR_Luv2RGB", - "CX_Luv2BGRA", "CX_Luv2LBGRA", "CX_Luv2LRGBA", "CX_Luv2RGBA", - - "COLOR_RGB2GRAY", - "COLOR_RGB2HLS", "COLOR_RGB2HLS_FULL", "COLOR_RGB2HSV", "COLOR_RGB2HSV_FULL", - "COLOR_RGB2Lab", "COLOR_RGB2Luv", "COLOR_RGB2XYZ", "COLOR_RGB2YCrCb", "COLOR_RGB2YUV", - - "COLOR_RGBA2BGR", "COLOR_RGBA2GRAY", - "CX_RGBA2HLS", "CX_RGBA2HLS_FULL", "CX_RGBA2HSV", "CX_RGBA2HSV_FULL", - "CX_RGBA2Lab", "CX_RGBA2Luv", "CX_RGBA2XYZ", - "CX_RGBA2YCrCb", "CX_RGBA2YUV", - - "COLOR_XYZ2BGR", "COLOR_XYZ2RGB", "CX_XYZ2BGRA", "CX_XYZ2RGBA", - - "COLOR_YCrCb2BGR", "COLOR_YCrCb2RGB", "CX_YCrCb2BGRA", "CX_YCrCb2RGBA", - "COLOR_YUV2BGR", "COLOR_YUV2RGB", "CX_YUV2BGRA", "CX_YUV2RGBA" - ]; const CvtMode32FSize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p]; const combiCvtMode32F = combine(CvtMode32FSize, CvtMode32F); + function constructeCvtMode(source) { + let cvtMode = source + cvtMode = cvtMode.concat(constructMode("COLOR_", "BGR", ["BGR555", "BGR565"])); + cvtMode = cvtMode.concat(constructMode("COLOR_", "BGR555", ["BGR", "BGRA", "GRAY", "RGB", "RGBA"])); + cvtMode = cvtMode.concat(constructMode("COLOR_", "BGR565", ["BGR", "BGRA", "GRAY", "RGB", "RGBA"])); + cvtMode = cvtMode.concat(constructMode("COLOR_", "BGRA", ["BGR555", "BGR565"])); + cvtMode = cvtMode.concat(constructMode("COLOR_", "GRAY", ["BGR555", "BGR565"])); + cvtMode = cvtMode.concat(constructMode("COLOR_", "RGB", ["BGR555", "BGR565"])); + cvtMode = cvtMode.concat(constructMode("COLOR_", "RGBA", ["BGR555", "BGR565"])); + + return cvtMode; + } + + const CvtMode = constructeCvtMode(CvtMode32F); + + const CvtModeSize = [cvSize.szODD, cvSize.szVGA, cvSize.sz1080p]; + // combiCvtMode permute size and mode + const combiCvtMode = combine(CvtModeSize, CvtMode); + const CvtModeBayer = [ "COLOR_BayerBG2BGR", "COLOR_BayerBG2BGRA", "COLOR_BayerBG2BGR_VNG", "COLOR_BayerBG2GRAY", "COLOR_BayerGB2BGR", "COLOR_BayerGB2BGRA", "COLOR_BayerGB2BGR_VNG", "COLOR_BayerGB2GRAY", @@ -357,7 +308,7 @@ cv.onRuntimeInitialized = () => { return [mat1Type, mat2Type]; } - function addCvtColorCase(suite) { + function addCvtColorCase(suite, type) { suite.add('cvtColor', function() { cv.cvtColor(mat1, mat2, mode, 0); }, { @@ -375,154 +326,22 @@ cv.onRuntimeInitialized = () => { }); } - function addCvtModeCase(suite, combination) { + function addCvtModeCase(suite, combination, type) { totalCaseNum += combination.length; for(let i = 0; i < combination.length; ++i) { let size = combination[i][0]; let mode = combination[i][1]; let chPair = getConversionInfo(mode); let matType = getMatType(chPair); - let sizeArray = [size.width, size.height]; - - addCvtColorCase(suite); - // set init params - let index = suite.length - 1; - suite[index].params = { - size: sizeArray, - matType: matType, - mode: mode - }; - }; - } - - function addCvtModeBayerCase(suite, combination) { - totalCaseNum += combination.length; - for(let i = 0; i < combination.length; ++i) { - let size = combination[i][0]; - let mode = combination[i][1]; - let chPair = getConversionInfo(mode); - let matType = getMatType(chPair); - let sizeArray = [size.width, size.height]; - - addCvtColorCase(suite); - // set init params - let index = suite.length - 1; - suite[index].params = { - size: sizeArray, - matType: matType, - mode: mode - }; - }; - } - - function addCvtMode2Case(suite, combination) { - totalCaseNum += combination.length; - for(let i = 0; i < combination.length; ++i) { - let size = combination[i][0]; - let mode = combination[i][1]; - let chPair = getConversionInfo(mode); - let matType = getMatType(chPair); - let sizeArray = [size.width, size.height+size.height/2]; - - addCvtColorCase(suite); - // set init params - let index = suite.length - 1; - suite[index].params = { - size: sizeArray, - matType: matType, - mode: mode - }; - }; - } - - function addCvtMode3Case(suite, combination) { - totalCaseNum += combination.length; - for(let i = 0; i < combination.length; ++i) { - let size = combination[i][0]; - let mode = combination[i][1]; - let chPair = getConversionInfo(mode); - let matType = getMatType(chPair); - let sizeArray = [size.width, size.height+size.height/2]; - - addCvtColorCase(suite); - // set init params - let index = suite.length - 1; - suite[index].params = { - size: sizeArray, - matType: matType, - mode: mode - }; - }; - } - - function addEdgeAwareBayerModeCase(suite, combination) { - totalCaseNum += combination.length; - for(let i = 0; i < combination.length; ++i) { - let size = combination[i][0]; - let mode = combination[i][1]; - let chPair = getConversionInfo(mode); - let matType = getMatType(chPair); - let sizeArray = [size.width, size.height]; - - addCvtColorCase(suite); - // set init params - let index = suite.length - 1; - suite[index].params = { - size: sizeArray, - matType: matType, - mode: mode - }; - }; - } - - function decodeParams2Case(suite, params) { - let sizeStr = (params.match(/[0-9]+/g) || []).slice(0, 2).toString(); - let mode = (params.match(/CX\_[A-z]+2[A-z]+/) || params.match(/COLOR\_[A-z]+2[A-z]+/) || []).toString(); - let size = cvtStr2cvSize(sizeStr); - - // check if the params match and add case - for (let i = 0; i < combinations.length; ++i) { - let combination = combinations[i]; - for (let j = 0; j < combination.length; ++j) { - if (size === combination[j][0] && mode === combination[j][1]) { - cvtFunc[i](suite, [combination[j]]); - } + let sizeArray; + if (type == 0) { + sizeArray = [size.width, size.height]; + } else { + sizeArray = [size.width, size.height+size.height/2]; } - } - } - - function log(message) { - console.log(message); - if (!isNodeJs) { - logElement.innerHTML += `\n${'\t' + message}`; - } - } - - function setBenchmarkSuite(suite) { - suite - // add listeners - .on('cycle', function(event) { - ++currentCaseId; - let params = event.target.params; - let mode = params.mode; - let size = params.size; - log(`=== ${event.target.name} ${currentCaseId} ===`); - log(`params: (${parseInt(size[0])}x${parseInt(size[1])}, ${mode})`); - log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms'); - log('mean time:' +String(event.target.stats.mean*1000)+' ms'); - log('stddev time:' +String(event.target.stats.deviation*1000)+' ms'); - log(String(event.target)); - }) - .on('error', function(event) { log(`test case ${event.target.name} failed`); }) - .on('complete', function(event) { - log(`\n ###################################`) - log(`Finished testing ${event.currentTarget.length} cases \n`); - if (!isNodeJs) { - runButton.removeAttribute('disabled'); - runButton.setAttribute('class', 'btn btn-primary'); - runButton.innerHTML = 'Run'; - } - }); + let params = {size:sizeArray, matType: matType, mode: mode}; + addKernelCase(suite, params, type, addCvtColorCase); + }; } function genBenchmarkCase(paramsContent) { @@ -531,23 +350,33 @@ cv.onRuntimeInitialized = () => { currentCaseId = 0; if (/\([0-9]+x[0-9]+,[\ ]*\w+\)/g.test(paramsContent.toString())) { let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+\)/g)[0]; - decodeParams2Case(suite, params); + let paramObjs = []; + paramObjs.push({name:"mode", value:"", reg:["/CX\_[A-z]+2[A-z]+/", "/COLOR\_[A-z]+2[A-z]+/"], index:1}); + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + + let locationList = decodeParams2Case(params, paramObjs,combinations); + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + if (first < 2) { + addCvtModeCase(suite, [combinations[first][second]], 0); + } else { + addCvtModeCase(suite, [combinations[first][second]], 1); + } + } } else { log("no filter or getting invalid params, run all the cases"); - addCvtModeCase(suite, combiCvtMode); - addCvtModeBayerCase(suite, combiCvtModeBayer); - addCvtMode2Case(suite, combiCvtMode2); - addCvtMode3Case(suite, combiCvtMode3); + addCvtModeCase(suite, combiCvtMode, 0); + addCvtModeCase(suite, combiCvtModeBayer, 0); + addCvtModeCase(suite, combiCvtMode2, 1); + addCvtModeCase(suite, combiCvtMode3, 1); } - setBenchmarkSuite(suite); + setBenchmarkSuite(suite, "cvtcolor", currentCaseId); log(`Running ${totalCaseNum} tests from CvtColor`); suite.run({ 'async': true }); // run the benchmark } - - // init - let cvtFunc = [addCvtModeCase, addCvtModeBayerCase, addCvtMode2Case, addCvtMode3Case];//, addEdgeAwareBayerModeCase]; let combinations = [combiCvtMode, combiCvtModeBayer, combiCvtMode2, combiCvtMode3];//, combiEdgeAwareBayer]; // set test filter params @@ -563,10 +392,19 @@ cv.onRuntimeInitialized = () => { let paramsContent = paramsElement.value; genBenchmarkCase(paramsContent); if (totalCaseNum !== 0) { - runButton.setAttribute("disabled", "disabled"); - runButton.setAttribute('class', 'btn btn-primary disabled'); - runButton.innerHTML = "Running"; + disableButton(); } } } -}; \ No newline at end of file +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_dilate.html b/modules/js/perf/perf_imgproc/perf_dilate.html new file mode 100644 index 0000000000..49c61f4be3 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_dilate.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ Dilate +
+
+

Parameters Filter

+ for example: (1024x768, CV_8UC1) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_dilate.js b/modules/js/perf/perf_imgproc/perf_dilate.js new file mode 100644 index 0000000000..c4e14c7be2 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_dilate.js @@ -0,0 +1,117 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const DilateSize = [cvSize.szQVGA, cvSize.szVGA, cvSize.szSVGA, cvSize.szXGA, cvSize.szSXGA]; + const DilateType = ["CV_8UC1", "CV_8UC4"]; + const combiDilate = combine(DilateSize, DilateType); + + function addDialteCase(suite, type) { + suite.add('dilate', function() { + cv.dilate(src, dst, kernel); + }, { + 'setup': function() { + let size = this.params.size; + let matType = cv[this.params.matType]; + let src = new cv.Mat(size, matType); + let dst = new cv.Mat(size, matType); + let kernel = new cv.Mat(); + }, + 'teardown': function() { + src.delete(); + dst.delete(); + kernel.delete(); + } + }); + } + + function addDilateModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let matType = combination[i][1]; + + let params = {size: size, matType:matType}; + addKernelCase(suite, params, type, addDialteCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1}); + let locationList = decodeParams2Case(params, paramObjs, dilateCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addDilateModeCase(suite, [dilateCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addDilateModeCase(suite, combiDilate, 0); + } + setBenchmarkSuite(suite, "dilate", currentCaseId); + log(`Running ${totalCaseNum} tests from dilate`); + suite.run({ 'async': true }); // run the benchmark + } + + let dilateCombinations = [combiDilate]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_erode.html b/modules/js/perf/perf_imgproc/perf_erode.html new file mode 100644 index 0000000000..2db653bd7a --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_erode.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ Erode +
+
+

Parameters Filter

+ for example: (1024x768, CV_8UC1) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_erode.js b/modules/js/perf/perf_imgproc/perf_erode.js new file mode 100644 index 0000000000..95aba6fa21 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_erode.js @@ -0,0 +1,117 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const ErodeSize = [cvSize.szQVGA, cvSize.szVGA, cvSize.szSVGA, cvSize.szXGA, cvSize.szSXGA]; + const ErodeType = ["CV_8UC1", "CV_8UC4"]; + const combiErode = combine(ErodeSize, ErodeType); + + function addErodeCase(suite, type) { + suite.add('erode', function() { + cv.erode(src, dst, kernel); + }, { + 'setup': function() { + let size = this.params.size; + let matType = cv[this.params.matType]; + let src = new cv.Mat(size, matType); + let dst = new cv.Mat(size, matType); + let kernel = new cv.Mat(); + }, + 'teardown': function() { + src.delete(); + dst.delete(); + kernel.delete(); + } + }); + } + + function addErodeModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let matType = combination[i][1]; + + let params = {size: size, matType:matType}; + addKernelCase(suite, params, type, addErodeCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1}); + let locationList = decodeParams2Case(params, paramObjs, erodeCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addErodeModeCase(suite, [erodeCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addErodeModeCase(suite, combiErode, 0); + } + setBenchmarkSuite(suite, "erode", currentCaseId); + log(`Running ${totalCaseNum} tests from erode`); + suite.run({ 'async': true }); // run the benchmark + } + + let erodeCombinations = [combiErode]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_filter2D.html b/modules/js/perf/perf_imgproc/perf_filter2D.html new file mode 100644 index 0000000000..347fa8076d --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_filter2D.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ Filter2D +
+
+

Parameters Filter

+ for example: (320x240, 3, BORDER_CONSTANT) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_filter2D.js b/modules/js/perf/perf_imgproc/perf_filter2D.js new file mode 100644 index 0000000000..d92dc2b55a --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_filter2D.js @@ -0,0 +1,127 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const Filter2dSize = [cvSize.szQVGA, cvSize.sz1080p]; + const Filter2dKsize = ["3", "5"]; + const Filter2dBorderMode = ["BORDER_CONSTANT", "BORDER_REPLICATE", "BORDER_REFLECT_101"]; + const DISABLED_Filter2dBorderMode = ["BORDER_CONSTANT", "BORDER_REPLICATE"]; + const combiFilter2dCase = combine(Filter2dSize, Filter2dKsize, Filter2dBorderMode); + const combiDISABLEDFilter2dCase = combine(Filter2dSize, Filter2dKsize, DISABLED_Filter2dBorderMode); + + function addFilter2dCase(suite, type) { + suite.add('filter2d', function() { + cv.filter2D(src, dst, cv.CV_8UC4, kernel, new cv.Point(1, 1), 0.0, borderMode); + }, { + 'setup': function() { + let size = this.params.size; + let ksize = parseInt(this.params.ksize); + let borderMode = cv[this.params.borderMode]; + + let src = new cv.Mat(size, cv.CV_8UC4); + let dst = new cv.Mat(size, cv.CV_8UC4); + let kernelElement = []; + for (let i = 0; i < ksize*ksize; i++) { + let randNum = Math.random(); + kernelElement.push(-3.0+randNum*13.0); + } + let kernel = cv.matFromArray(ksize, ksize, cv.CV_32FC1, kernelElement); + }, + 'teardown': function() { + src.delete(); + dst.delete(); + } + }); + } + + function addFilter2dModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let ksize = combination[i][1]; + let borderMode = combination[i][2]; + let params = {size: size, ksize: ksize, borderMode:borderMode}; + addKernelCase(suite, params, type, addFilter2dCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*[0-9],[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*[0-9],[\ ]*BORDER\_\w+\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"ksize", value:"", reg:["/\\b[0-9]\\b/"], index:1}); + paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2}); + let locationList = decodeParams2Case(params, paramObjs,filter2dCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addFilter2dModeCase(suite, [filter2dCombinations[first][second]], 0); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addFilter2dModeCase(suite, combiFilter2dCase, 0); + } + setBenchmarkSuite(suite, "filter2d", currentCaseId); + log(`Running ${totalCaseNum} tests from Filter2d`); + suite.run({ 'async': true }); // run the benchmark + } + + let filter2dCombinations = [combiFilter2dCase];//,combiDISABLEDFilter2dCase]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*[0-9],[\ ]*BORDER\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*[0-9],[\ ]*BORDER\_\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_gaussianBlur.html b/modules/js/perf/perf_imgproc/perf_gaussianBlur.html new file mode 100644 index 0000000000..3f56c22f7d --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_gaussianBlur.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ gaussianBlur +
+
+

Parameters Filter

+ for example: (1280x720, CV_8UC1, BORDER_REPLICATE) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_gaussianBlur.js b/modules/js/perf/perf_imgproc/perf_gaussianBlur.js new file mode 100644 index 0000000000..33c5401a7e --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_gaussianBlur.js @@ -0,0 +1,126 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const GaussianBlurSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA, cvSize.sz720p]; + const GaussianBlurType = ["CV_8UC1", "CV_8UC4", "CV_16UC1", "CV_16SC1", "CV_32FC1"]; + const BorderType3x3 = ["BORDER_REPLICATE", "BORDER_CONSTANT"]; + const BorderType3x3ROI = ["BORDER_REPLICATE", "BORDER_CONSTANT", "BORDER_REFLECT", "BORDER_REFLECT101"]; + + const combiGaussianBlurBorder3x3 = combine(GaussianBlurSize, GaussianBlurType, BorderType3x3); + const combiGaussianBlurBorder3x3ROI = combine(GaussianBlurSize, GaussianBlurType, BorderType3x3ROI); + + function addGaussianBlurCase(suite, type) { + suite.add('gaussianBlur', function() { + cv.GaussianBlur(src, dst, ksize, 1, 0, borderType); + }, { + 'setup': function() { + let size = this.params.size; + let matType = cv[this.params.matType]; + let borderType = cv[this.params.borderType]; + let type = this.params.type; + let src = new cv.Mat(size, matType); + let dst = new cv.Mat(size, matType); + let ksizeNum = this.params.ksize; + let ksize = new cv.Size(ksizeNum, ksizeNum); + }, + 'teardown': function() { + src.delete(); + dst.delete(); + } + }); + } + + function addGaussianBlurModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let matType = combination[i][1]; + let borderType = combination[i][2]; + let ksizeArray = [3, 5]; + let params = {size: size, matType:matType, ksize: ksizeArray[type], borderType:borderType}; + addKernelCase(suite, params, type, addGaussianBlurCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1}); + paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2}); + let locationList = decodeParams2Case(params, paramObjs,gaussianBlurCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addGaussianBlurModeCase(suite, [gaussianBlurCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addGaussianBlurModeCase(suite, combiGaussianBlurBorder3x3, 0); + addGaussianBlurModeCase(suite, combiGaussianBlurBorder3x3ROI, 1); + } + setBenchmarkSuite(suite, "gaussianBlur", currentCaseId); + log(`Running ${totalCaseNum} tests from gaussianBlur`); + suite.run({ 'async': true }); // run the benchmark + } + + let gaussianBlurCombinations = [combiGaussianBlurBorder3x3, combiGaussianBlurBorder3x3ROI]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*BORDER\_\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_medianBlur.html b/modules/js/perf/perf_imgproc/perf_medianBlur.html new file mode 100644 index 0000000000..6e390beec2 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_medianBlur.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ MedianBlur +
+
+

Parameters Filter

+ for example: (1280x720, CV_8UC1, 3) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_medianBlur.js b/modules/js/perf/perf_imgproc/perf_medianBlur.js new file mode 100644 index 0000000000..69b7ba3ead --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_medianBlur.js @@ -0,0 +1,118 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const MedianBlurSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA, cvSize.sz720p]; + const MedianBlurType = ["CV_8UC1", "CV_8UC4", "CV_16UC1", "CV_16SC1", "CV_32FC1"]; + const combiMedianBlur = combine(MedianBlurSize, MedianBlurType, [3,5]); + + function addMedianBlurCase(suite, type) { + suite.add('medianBlur', function() { + cv.medianBlur(src, dst, ksize); + }, { + 'setup': function() { + let size = this.params.size; + let matType = cv[this.params.matType]; + let ksize = this.params.ksize; + let src = new cv.Mat(size, matType); + let dst = new cv.Mat(size, matType); + }, + 'teardown': function() { + src.delete(); + dst.delete(); + } + }); + } + + function addMedianBlurModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let matType = combination[i][1]; + let ksize = combination[i][2]; + + let params = {size: size, matType:matType, ksize: ksize}; + addKernelCase(suite, params, type, addMedianBlurCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*(3|5)\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*(3|5)\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1}); + paramObjs.push({name:"ksize", value: "", reg:["/\\b[0-9]\\b/"], index:2}); + let locationList = decodeParams2Case(params, paramObjs, medianBlurCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addMedianBlurModeCase(suite, [medianBlurCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addMedianBlurModeCase(suite, combiMedianBlur, 0); + } + setBenchmarkSuite(suite, "medianBlur", currentCaseId); + log(`Running ${totalCaseNum} tests from medianBlur`); + suite.run({ 'async': true }); // run the benchmark + } + + let medianBlurCombinations = [combiMedianBlur]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*(3|5)\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*(3|5)\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_pyrDown.html b/modules/js/perf/perf_imgproc/perf_pyrDown.html new file mode 100644 index 0000000000..f90ac5f55e --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_pyrDown.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ pyrDown +
+
+

Parameters Filter

+ for example: (1920x1080, CV_8UC3) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_pyrDown.js b/modules/js/perf/perf_imgproc/perf_pyrDown.js new file mode 100644 index 0000000000..a98b109ade --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_pyrDown.js @@ -0,0 +1,116 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const PyrDownSize = [cvSize.sz1080p, cvSize.sz720p, cvSize.szVGA, cvSize.szQVGA, cvSize.szODD]; + const PyrDownType = ["CV_8UC1", "CV_8UC3", "CV_8UC4", "CV_16SC1", "CV_16SC3", "CV_16SC4", "CV_32FC1", "CV_32FC3", "CV_32FC4"]; + + const combiPyrDown = combine(PyrDownSize, PyrDownType); + + function addPryDownCase(suite, type) { + suite.add('pyrDown', function() { + cv.pyrDown(src, dst); + }, { + 'setup': function() { + let size = this.params.size; + let matType = cv[this.params.matType]; + let src = new cv.Mat(size, matType); + let dst = new cv.Mat((size.height + 1)/2, (size.height + 1)/2, matType) + }, + 'teardown': function() { + src.delete(); + dst.delete(); + } + }); + } + + function addPyrDownModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let matType = combination[i][1]; + + let params = {size: size, matType:matType}; + addKernelCase(suite, params, type, addPryDownCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1}); + let locationList = decodeParams2Case(params, paramObjs, pyrDownCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addPyrDownModeCase(suite, [pyrDownCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addPyrDownModeCase(suite, combiPyrDown, 0); + } + setBenchmarkSuite(suite, "pyrDown", currentCaseId); + log(`Running ${totalCaseNum} tests from pyrDown`); + suite.run({ 'async': true }); // run the benchmark + } + + let pyrDownCombinations = [combiPyrDown]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_remap.html b/modules/js/perf/perf_imgproc/perf_remap.html new file mode 100644 index 0000000000..6812adb0a0 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_remap.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ Remap +
+
+

Parameters Filter

+ for example: (640x480, CV_16UC1, CV_16SC2, INTER_NEAREST) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_remap.js b/modules/js/perf/perf_imgproc/perf_remap.js new file mode 100644 index 0000000000..fe2e5d7541 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_remap.js @@ -0,0 +1,182 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const RemapSize = [cvSize.szVGA, cvSize.sz1080p]; + const RemapSrcType = ["CV_16UC1", "CV_16SC1", "CV_32FC1"]; + const RemapType = ["CV_16SC2", "CV_32FC1", "CV_32FC2"]; + const InterType = ["INTER_NEAREST", "INTER_LINEAR", "INTER_CUBIC", "INTER_LANCZOS4"]; + const combiRemap = combine(RemapSize, RemapSrcType, RemapType, InterType); + + function addRemapCase(suite, type) { + suite.add('remap', function() { + cv.remap(src, dst, map1, map2, interType); + }, { + 'setup': function() { + let size = this.params.size; + let matType = cv[this.params.matType]; + let mapType = cv[this.params.mapType]; + let interType = cv[this.params.interType]; + + + let src = new cv.Mat(size, matType); + let dst = new cv.Mat(size, matType); + let map1 = new cv.Mat(size, mapType); + let map2; + if (mapType == cv.CV_32FC1) { + map2 = new cv.Mat(size, mapType); + } else if (interType != cv.INTER_NEAREST && mapType == cv.CV_16SC2) { + map2 = new cv.Mat.zeros(size, cv.CV_16UC1); + } else { + map2 = new cv.Mat(); + } + + for (let j = 0; j < map1.rows; j++) { + for (let i = 0; i < map1.cols; i++) { + let randNum = Math.random(); + let view, view1; + switch(matType) { + case cv.CV_16UC1: + view = src.ushortPtr(j,i); + view[0] = Math.floor(randNum*256); + break; + case cv.CV_16SC1: + view = src.shortPtr(j,i); + view[0] = Math.floor(randNum*256); + break; + case cv.CV_32FC1: + view = src.floatPtr(j,i); + view[0] = randNum*256; + break; + default: + console.error("Unknown conversion type 1"); + break; + } + + switch(mapType) { + case cv.CV_32FC1: + view1 = map1.floatPtr(j,i); + let view2 = map2.floatPtr(j,i); + view1[0] = src.cols - i - 1; + view2[0] = j; + break; + case cv.CV_32FC2: + view1 = map1.floatPtr(j,i); + view1[0] = src.cols - i - 1; + view1[1] = j; + break; + case cv.CV_16SC2: + view1 = map1.shortPtr(j,i); + view1[0] = src.cols - i - 1; + view1[1] = j; + break; + default: + console.error("Unknown conversion type 2"); + break; + } + } + } + }, + 'teardown': function() { + src.delete(); + dst.delete(); + map1.delete(); + map2.delete(); + } + }); + } + + function addRemapModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let matType = combination[i][1]; + let mapType = combination[i][2]; + let interType = combination[i][3]; + + let params = {size: size, matType:matType, mapType:mapType, interType:interType}; + addKernelCase(suite, params, type, addRemapCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*CV\_\w+,[\ ]*INTER\_\w+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*CV\_\w+,[\ ]*INTER\_\w+\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/"], index:1}); + paramObjs.push({name:"mapType", value:"", reg:["/CV\_[0-9]+[FSUfsu]C[0-9]/g"], index:2, loc:1}); + paramObjs.push({name:"interType", value: "", reg:["/INTER\_\\w+/"], index:3}); + let locationList = decodeParams2Case(params, paramObjs, remapCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addRemapModeCase(suite, [remapCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addRemapModeCase(suite, combiRemap, 0); + } + setBenchmarkSuite(suite, "remap", currentCaseId); + log(`Running ${totalCaseNum} tests from remap`); + suite.run({ 'async': true }); // run the benchmark + } + + let remapCombinations = [combiRemap]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*CV\_\w+,[\ ]*INTER\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*CV\_\w+,[\ ]*INTER\_\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_resize.js b/modules/js/perf/perf_imgproc/perf_resize.js index 4e71db3806..3eef30f0e3 100644 --- a/modules/js/perf/perf_imgproc/perf_resize.js +++ b/modules/js/perf/perf_imgproc/perf_resize.js @@ -11,18 +11,17 @@ if (isNodeJs) { var logElement = document.getElementById('log'); } -cv.onRuntimeInitialized = () => { +function perf() { + console.log('opencv.js loaded'); if (isNodeJs) { global.cv = cv; global.combine = HelpFunc.combine; - global.fillGradient = HelpFunc.fillGradient; global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; - global.cvSize = Base.cvSize; + global.cvSize = Base.getCvSize(); } else { - runButton.removeAttribute('disabled'); - runButton.setAttribute('class', 'btn btn-primary'); - runButton.innerHTML = 'Run'; + enableButton(); + cvSize = getCvSize(); } let totalCaseNum, currentCaseId; @@ -59,185 +58,80 @@ cv.onRuntimeInitialized = () => { const scalesAreaFast = [2]; const combiAreaFast = combine(matTypesAreaFast, sizesAreaFast, scalesAreaFast); - function addResizeUpLinearCase(suite, combination) { - totalCaseNum += combination.length; - for (let i = 0; i < combination.length; ++i) { - let matType = combination[i][0]; - let from = combination[i][1]; - let to = combination[i][2]; - - suite.add('resize', function() { - cv.resize(src, dst, to, 0, 0, cv.INTER_LINEAR_EXACT); - }, { - 'setup': function() { - let from = this.params.from; - let to = this.params.to; - let matType = cv[this.params.matType]; - let src = new cv.Mat(from, matType); - let dst = new cv.Mat(to, matType); - fillGradient(cv, src); - }, - 'teardown': function() { - src.delete(); - dst.delete(); - } - }); - - // set init params - let index = suite.length - 1; - suite[index].params = { - from: from, - to: to, - matType: matType - }; - } - } - - function addResizeDownLinearCase(suite, combination) { - totalCaseNum += combination.length; - for (let i = 0; i < combination.length; ++i) { - let matType = combination[i][0]; - let from = combination[i][1]; - let to = combination[i][2]; - - suite.add('resize', function() { - cv.resize(src, dst, to, 0, 0, cv.INTER_LINEAR_EXACT); - }, { - 'setup': function() { - let from = this.params.from; - let to = this.params.to; - let matType = cv[this.params.matType]; - let src = new cv.Mat(from, matType); - let dst = new cv.Mat(to, matType); - fillGradient(cv, src); - }, - 'teardown': function() { - src.delete(); - dst.delete(); - } - }); - - // set init params - let index = suite.length - 1; - suite[index].params = { - from: from, - to: to, - matType: matType - }; - } - } - - function addResizeAreaFastCase(suite, combination) { - totalCaseNum += combination.length; - for (let i = 0; i < combination.length; ++i) { - let matType = combination[i][0]; - let from = combination[i][1]; - let scale = combination[i][2]; - from.width = (Math.floor(from.width/scale))*scale; - from.height = (Math.floor(from.height/scale))*scale; - let to = { - width: from.width/scale, - height: from.height/scale}; // for params print - - suite.add('resize', function() { + function addResizeCase(suite, type) { + suite.add('resize', function() { + if (type == "area") { cv.resize(src, dst, dst.size(), 0, 0, cv.INTER_AREA); - }, { - 'setup': function() { - let from = this.params.from; - let scale = this.params.scale; - let matType = cv[this.params.matType]; - let src = new cv.Mat(from, matType); - let dst = new cv.Mat(from.height/scale, from.width/scale, matType); - }, - 'teardown': function() { - src.delete(); - dst.delete(); + } else { + cv.resize(src, dst, to, 0, 0, cv.INTER_LINEAR_EXACT); + } + }, { + 'setup': function() { + let from = this.params.from; + let to = this.params.to; + let matType = cv[this.params.matType]; + let src = new cv.Mat(from, matType); + let type = this.params.modeType; + let dst; + if (type == "area") { + dst = new cv.Mat(from.height/scale, from.width/scale, matType); + } else { + dst = new cv.Mat(to, matType); + fillGradient(cv, src); } - }); - // set init params - let index = suite.length - 1; - suite[index].params = { - from: from, - scale: scale, - matType: matType - }; - } - } - - function decodeParams2Case(suite, params) { - let sizeString = (params.match(/[0-9]+x[0-9]+/g) || []).slice(0, 2).toString(); - let sizes = (sizeString.match(/[0-9]+/g) || []); - let size1Str = sizes.slice(0, 2).toString(); - let size2Str = sizes.slice(2, 5).toString(); - let matType = (params.match(/CV\_[0-9]+[A-z][A-z][0-9]/) || []).toString(); - let size1 = cvtStr2cvSize(size1Str); - let size2 = cvtStr2cvSize(size2Str); - // check if the params match and add case - for (let i = 0; i < combinations.length; ++i) { - let combination = combinations[i]; - for (let j = 0; j < combination.length; ++j) { - if (matType === combination[j][0] && size1 === combination[j][1] && size2 === combination[j][2]) { - resizeFunc[i](suite, [combination[j]]); + }, + 'teardown': function() { + src.delete(); + dst.delete(); } - } - } - } - - function log(message) { - console.log(message); - if (!isNodeJs) { - logElement.innerHTML += `\n${'\t'.repeat(1) + message}`; - } - } - - function setBenchmarkSuite(suite) { - suite - // add listeners - .on('cycle', function(event) { - ++currentCaseId; - let params = event.target.params; - let matType = params.matType; - let size1 = params.from; - let size2 = params.to; - log(`=== ${event.target.name} ${currentCaseId} ===`); - log(`params: (${matType},${parseInt(size1.width)}x${parseInt(size1.height)},`+ - `${parseInt(size2.width)}x${parseInt(size2.height)})`); - log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms'); - log('mean time:' +String(event.target.stats.mean*1000)+' ms'); - log('stddev time:' +String(event.target.stats.deviation*1000)+' ms'); - log(String(event.target)); - }) - .on('error', function(event) { log(`test case ${event.target.name} failed`); }) - .on('complete', function(event) { - log(`\n ###################################`) - log(`Finished testing ${event.currentTarget.length} cases \n`); - if (!isNodeJs) { - runButton.removeAttribute('disabled'); - runButton.setAttribute('class', 'btn btn-primary'); - runButton.innerHTML = 'Run'; - } }); } + function addResizeModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let matType = combination[i][0]; + let from = combination[i][1]; + let params; + if (type == "area") { + let scale = combination[i][2]; + params = { from: from, scale: scale, matType: matType, modeType: type }; + } else { + let to = combination[i][2]; + params = { from: from, to: to, matType: matType, modeType: type}; + } + addKernelCase(suite, params, type, addResizeCase) + } + } + function genBenchmarkCase(paramsContent) { let suite = new Benchmark.Suite; totalCaseNum = 0; currentCaseId = 0; if (/\(\w+,[\ ]*[0-9]+x[0-9]+,[\ ]*[0-9]+x[0-9]+\)/g.test(paramsContent.toString())) { let params = paramsContent.toString().match(/\(\w+,[\ ]*[0-9]+x[0-9]+,[\ ]*[0-9]+x[0-9]+\)/g)[0]; - decodeParams2Case(suite, params); + let paramObjs = []; + paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[A-z][A-z][0-9]/"], index:0}); + paramObjs.push({name:"size1", value:"", reg:[""], index:1}); + paramObjs.push({name:"size2", value:"", reg:[""], index:2}); + let locationList = decodeParams2Case(params, paramObjs,combinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addResizeModeCase(suite, [combinations[first][second]], "linear"); + } } else { log("no filter or getting invalid params, run all the cases"); - addResizeUpLinearCase(suite, combiUpLinear); - addResizeDownLinearCase(suite, combiDownLinear); + addResizeModeCase(suite, combiUpLinear, "linear"); + addResizeModeCase(suite, combiDownLinear, "linear"); } - setBenchmarkSuite(suite); + setBenchmarkSuite(suite, "resize", currentCaseId); log(`Running ${totalCaseNum} tests from Resize`); suite.run({ 'async': true }); // run the benchmark } // init - let resizeFunc = [addResizeUpLinearCase, addResizeDownLinearCase];//, addResizeAreaFastCase]; let combinations = [combiUpLinear, combiDownLinear];//, combiAreaFast]; // set test filter params @@ -253,10 +147,19 @@ cv.onRuntimeInitialized = () => { let paramsContent = paramsElement.value; genBenchmarkCase(paramsContent); if (totalCaseNum !== 0) { - runButton.setAttribute("disabled", "disabled"); - runButton.setAttribute('class', 'btn btn-primary disabled'); - runButton.innerHTML = "Running"; + disableButton(); } } } -}; \ No newline at end of file +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_scharr.html b/modules/js/perf/perf_imgproc/perf_scharr.html new file mode 100644 index 0000000000..720ca741eb --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_scharr.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ Scharr +
+
+

Parameters Filter

+ for example: (640x480, CV_16SC1, (0,1), BORDER_REPLICATE) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_scharr.js b/modules/js/perf/perf_imgproc/perf_scharr.js new file mode 100644 index 0000000000..a76a93078c --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_scharr.js @@ -0,0 +1,156 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const ScharrSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA]; + const Scharrdxdy = ["(1,0)", "(0,1)"]; + const BorderType3x3 = ["BORDER_REPLICATE", "BORDER_CONSTANT"]; + const BorderType3x3ROI = ["BORDER_DEFAULT", "BORDER_REPLICATE|BORDER_ISOLATED", "BORDER_CONSTANT|BORDER_ISOLATED"]; + + const combiScharrBorder3x3 = combine(ScharrSize, ["CV_16SC1", "CV_32FC1"], Scharrdxdy, BorderType3x3); + const combiScharrBorder3x3ROI = combine(ScharrSize, ["CV_16SC1", "CV_32FC1"], Scharrdxdy, BorderType3x3ROI); + + function addScharrCase(suite, type) { + suite.add('scharr', function() { + cv.Scharr(src, dst, ddepth, dx, dy, 1, 0, borderType); + }, { + 'setup': function() { + let size = this.params.size; + let ddepth = cv[this.params.ddepth]; + let dxdy = this.params.dxdy; + let type = this.params.type; + let src, dst; + if (type == 0) { + src = new cv.Mat(size[1], size[0], cv.CV_8U); + dst = new cv.Mat(size[1], size[0], ddepth); + } else { + src = new cv.Mat(size[1]+10, size[0]+10, cv.CV_8U); + dst = new cv.Mat(size[1]+10, size[0]+10, ddepth); + src = src.colRange(5, size[0]+5); + src = src.rowRange(5, size[1]+5); + dst = dst.colRange(5, size[0]+5); + dst = dst.rowRange(5, size[1]+5); + } + + let dx = parseInt(dxdy[1]); + let dy = parseInt(dxdy[3]); + let borderTypeArray = this.params.borderType; + let borderType; + if (borderTypeArray.length == 1) { + borderType = cv[borderTypeArray[0]]; + } else { + borderType = cv[borderTypeArray[0]] | cv[borderTypeArray[1]]; + } + }, + 'teardown': function() { + src.delete(); + dst.delete(); + } + }); + } + + function addScharrModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let ddepth = combination[i][1]; + let dxdy = combination[i][2]; + let borderType = combination[i][3]; + let sizeArray = [size.width, size.height]; + + let borderTypeArray = borderType.split("|"); + let params = {size: sizeArray, ddepth: ddepth, dxdy: dxdy, borderType:borderTypeArray, type:type}; + addKernelCase(suite, params, type, addScharrCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + let params = ""; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"ddepth", value:"", reg:["/CV\_[0-9]+[FSUfsu]C1/g"], index:1}); + paramObjs.push({name:"dxdy", value:"", reg:["/\\([0-2],[0-2]\\)/"], index:2}); + + if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g.test(paramsContent.toString())) { + params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g)[0]; + paramObjs.push({name:"boderType", value:"", reg:["/BORDER\_\\w+/"], index:3}); + } else if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g.test(paramsContent.toString())) { + params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g)[0]; + paramObjs.push({name:"boderType", value:"", reg:["/BORDER\_\\w+\\|BORDER\_\\w+/"], index:3}); + } + + if (params != ""){ + let locationList = decodeParams2Case(params, paramObjs,scharrCombinations); + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addScharrModeCase(suite, [scharrCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addScharrModeCase(suite, combiScharrBorder3x3, 0); + addScharrModeCase(suite, combiScharrBorder3x3ROI, 1); + } + setBenchmarkSuite(suite, "scharr", currentCaseId); + log(`Running ${totalCaseNum} tests from Scharr`); + suite.run({ 'async': true }); // run the benchmark + } + + let scharrCombinations = [combiScharrBorder3x3, combiScharrBorder3x3ROI]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g)[0]; + } else if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_sobel.html b/modules/js/perf/perf_imgproc/perf_sobel.html new file mode 100644 index 0000000000..b41c940a23 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_sobel.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ Sobel +
+
+

Parameters Filter

+ for example: (640x480, CV_16SC1, (0,1), BORDER_REPLICATE) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_sobel.js b/modules/js/perf/perf_imgproc/perf_sobel.js new file mode 100644 index 0000000000..b7064e852a --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_sobel.js @@ -0,0 +1,170 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const SobelSize = [cvSize.szODD, cvSize.szQVGA, cvSize.szVGA]; + const Sobel3x3dxdy = ["(0,1)", "(1,0)", "(1,1)", "(0,2)", "(2,0)", "(2,2)"]; + const Sobeldxdy = ["(0,1)", "(1,0)", "(1,1)", "(0,2)", "(2,0)"]; + const BorderType3x3 = ["BORDER_REPLICATE", "BORDER_CONSTANT"]; + const BorderType3x3ROI = ["BORDER_DEFAULT", "BORDER_REPLICATE|BORDER_ISOLATED", "BORDER_CONSTANT|BORDER_ISOLATED"]; + const BorderType = ["BORDER_REPLICATE", "BORDER_CONSTANT", "BORDER_REFLECT", "BORDER_REFLECT101"]; + const BorderTypeROI = ["BORDER_DEFAULT", "BORDER_REPLICATE|BORDER_ISOLATED", "BORDER_CONSTANT|BORDER_ISOLATED", "BORDER_REFLECT|BORDER_ISOLATED", "BORDER_REFLECT101|BORDER_ISOLATED"] + + const combiSobelBorder3x3 = combine(SobelSize, ["CV_16SC1", "CV_32FC1"], Sobel3x3dxdy, BorderType3x3); + const combiSobelBorder3x3ROI = combine(SobelSize, ["CV_16SC1", "CV_32FC1"], Sobel3x3dxdy, BorderType3x3ROI); + const combiSobelBorder5x5 = combine(SobelSize, ["CV_16SC1", "CV_32FC1"], Sobeldxdy, BorderType); + const combiSobelBorder5x5ROI = combine(SobelSize, ["CV_16SC1", "CV_32FC1"], Sobeldxdy, BorderTypeROI); + + function addSobelCase(suite, type) { + suite.add('sobel', function() { + cv.Sobel(src, dst, ddepth, dx, dy, ksize, 1, 0, borderType); + }, { + 'setup': function() { + let size = this.params.size; + let ddepth = cv[this.params.ddepth]; + let dxdy = this.params.dxdy; + let ksize = this.params.ksize; + let type = this.params.type; + let src, dst; + if (type %2 == 0) { + src = new cv.Mat(size[1], size[0], cv.CV_8U); + dst = new cv.Mat(size[1], size[0], ddepth); + } else { + src = new cv.Mat(size[1]+10, size[0]+10, cv.CV_8U); + dst = new cv.Mat(size[1]+10, size[0]+10, ddepth); + src = src.colRange(5, size[0]+5); + src = src.rowRange(5, size[1]+5); + dst = dst.colRange(5, size[0]+5); + dst = dst.rowRange(5, size[1]+5); + } + + let dx = parseInt(dxdy[1]); + let dy = parseInt(dxdy[3]); + let borderTypeArray = this.params.borderType; + let borderType; + if (borderTypeArray.length == 1) { + borderType = cv[borderTypeArray[0]]; + } else { + borderType = cv[borderTypeArray[0]] | cv[borderTypeArray[1]]; + } + }, + 'teardown': function() { + src.delete(); + dst.delete(); + } + }); + } + + function addSobelModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let ddepth = combination[i][1]; + let dxdy = combination[i][2]; + let borderType = combination[i][3]; + let sizeArray = [size.width, size.height]; + let ksize; + if (type < 2) { + ksize = 3; + } else { + ksize = 5; + } + + let borderTypeArray = borderType.split("|"); + let params = {size: sizeArray, ddepth: ddepth, dxdy: dxdy, ksize:ksize, borderType:borderTypeArray, type:type}; + addKernelCase(suite, params, type, addSobelCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + let params = ""; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"ddepth", value:"", reg:["/CV\_[0-9]+[FSUfsu]C1/g"], index:1}); + paramObjs.push({name:"dxdy", value:"", reg:["/\\([0-2],[0-2]\\)/"], index:2}); + + if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g.test(paramsContent.toString())) { + params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g)[0]; + paramObjs.push({name:"boderType", value:"", reg:["/BORDER\_\\w+/"], index:3}); + } else if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g.test(paramsContent.toString())) { + params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g)[0]; + paramObjs.push({name:"boderType", value:"", reg:["/BORDER\_\\w+\\|BORDER\_\\w+/"], index:3}); + } + + if (params != ""){ + let locationList = decodeParams2Case(params, paramObjs,sobelCombinations); + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addSobelModeCase(suite, [sobelCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addSobelModeCase(suite, combiSobelBorder3x3, 0); + addSobelModeCase(suite, combiSobelBorder3x3ROI, 1); + addSobelModeCase(suite, combiSobelBorder5x5, 2); + addSobelModeCase(suite, combiSobelBorder5x5ROI, 3); + } + setBenchmarkSuite(suite, "sobel", currentCaseId); + log(`Running ${totalCaseNum} tests from Sobel`); + suite.run({ 'async': true }); // run the benchmark + } + + let sobelCombinations = [combiSobelBorder3x3, combiSobelBorder3x3ROI, combiSobelBorder5x5, combiSobelBorder5x5ROI]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\)/g)[0]; + } else if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\([0-2],[0-2]\),[\ ]*\w+\|\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_threshold.js b/modules/js/perf/perf_imgproc/perf_threshold.js index 2616a2feaa..381ddaeade 100644 --- a/modules/js/perf/perf_imgproc/perf_threshold.js +++ b/modules/js/perf/perf_imgproc/perf_threshold.js @@ -11,17 +11,17 @@ if (isNodeJs) { var logElement = document.getElementById('log'); } -cv.onRuntimeInitialized = () => { +function perf() { + console.log('opencv.js loaded'); if (isNodeJs) { global.cv = cv; global.combine = HelpFunc.combine; global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; - global.cvSize = Base.cvSize; + global.cvSize = Base.getCvSize(); } else { - runButton.removeAttribute('disabled'); - runButton.setAttribute('class', 'btn btn-primary'); - runButton.innerHTML = 'Run'; + enableButton(); + cvSize = getCvSize(); } let totalCaseNum, currentCaseId; @@ -32,173 +32,105 @@ cv.onRuntimeInitialized = () => { const combiSizeMatTypeThreshType = combine(typicalMatSizes, matTypes, threshTypes); const combiSizeOnly = combine(typicalMatSizes, ['CV_8UC1'], ['THRESH_BINARY|THRESH_OTSU']); - function addSizeMatTypeThreshTypeCase(suite, combination) { - totalCaseNum += combination.length; - for (let i = 0; i < combination.length; ++i) { - let matSize = combination[i][0]; - let matType = combination[i][1]; - let threshType = combination[i][2]; - suite.add('threshold', function() { - cv.threshold(src, dst, threshold, thresholdMax, threshType); - }, { - 'setup': function() { - let matSize = this.params.matSize; - let matType = cv[this.params.matType]; - let threshType = cv[this.params.threshType]; - let threshold = 127.0; - let thresholdMax = 210.0; - let src = new cv.Mat(matSize, matType); - let dst = new cv.Mat(matSize, matType); - let srcView = src.data; - srcView[0] = 0; - srcView[1] = 100; - srcView[2] = 200; - }, - 'teardown': function() { - src.delete(); - dst.delete(); - } - }); - - // set init params - let index = suite.length - 1; - suite[index].params = { - matSize: matSize, - matType: matType, - threshType: threshType - }; - } - } - - function addSizeOnlyCase(suite, combination) { - totalCaseNum += combination.length; - for (let i = 0; i < combination.length; ++i) { - let matSize = combination[i][0]; - - suite.add('threshold', function() { + function addThresholdCase(suite, type) { + suite.add('threshold', function() { + if (type == "sizeonly") { cv.threshold(src, dst, threshold, thresholdMax, cv.THRESH_BINARY|cv.THRESH_OTSU); - }, { - 'setup': function() { - let matSize = this.params.matSize; - let threshold = 127.0; - let thresholdMax = 210.0; - let src = new cv.Mat(matSize, cv.CV_8UC1); - let dst = new cv.Mat(matSize, cv.CV_8UC1); - let srcView = src.data; - srcView[0] = 0; - srcView[1] = 100; - srcView[2] = 200; - }, - 'teardown': function() { - src.delete(); - dst.delete(); + } else { + cv.threshold(src, dst, threshold, thresholdMax, threshType); + } + }, { + 'setup': function() { + let matSize = this.params.matSize; + let type = this.params.modeType; + let src, dst, matType, threshType; + if (type == "sizeonly") { + src = new cv.Mat(matSize, cv.CV_8UC1); + dst = new cv.Mat(matSize, cv.CV_8UC1); + } else { + matType = cv[this.params.matType]; + threshType = cv[this.params.threshType]; + src = new cv.Mat(matSize, matType); + dst = new cv.Mat(matSize, matType); } - }); - - // set init params - let index = suite.length - 1; - suite[index].params = { - matSize: matSize, - matType: 'CV_8UC1', - threshType: 'THRESH_BINARY|THRESH_OTSU' - }; - } - } - - function decodeParams2Case(suite, params, isSizeOnly) { - let sizeString = params.match(/[0-9]+x[0-9]+/g).toString(); - let sizes = sizeString.match(/[0-9]+/g); - let size1Str = sizes.slice(0, 2).toString(); - let matSize = cvtStr2cvSize(size1Str); - let matType, threshType; - if (isSizeOnly) { - matType = 'CV_8UC1'; - threshType = 'THRESH_BINARY|THRESH_OTSU'; - } else { - matType = (params.match(/CV\_[0-9]+[A-z][A-z][0-9]/) || []).toString(); - threshType = (params.match(/THRESH\_[A-z]+\_?[A-z]*/) || []).toString(); - } - // check if the params match and add case - for (let i = 0; i < combinations.length; ++i) { - let combination = combinations[i]; - for (let j = 0; j < combination.length; ++j) { - if (matSize === combination[j][0] && matType === combination[j][1] && threshType === combination[j][2]) { - thresholdFunc[i](suite, [combination[j]]); + let threshold = 127.0; + let thresholdMax = 210.0; + let srcView = src.data; + srcView[0] = 0; + srcView[1] = 100; + srcView[2] = 200; + }, + 'teardown': function() { + src.delete(); + dst.delete(); } - } - } - } - - function log(message) { - console.log(message);1 - if (!isNodeJs) { - logElement.innerHTML += `\n${'\t'.repeat(1) + message}`; - } - } - - function setBenchmarkSuite(suite) { - suite - // add listeners - .on('cycle', function(event) { - ++currentCaseId; - let params = event.target.params; - let matSize = params.matSize; - let matType = params.matType; - let threshType = params.threshType; - log(`=== ${event.target.name} ${currentCaseId} ===`); - log(`params: (${parseInt(matSize.width)}x${parseInt(matSize.height)},`+ - `${matType},${threshType})`); - log('elapsed time:' +String(event.target.times.elapsed*1000)+' ms'); - log('mean time:' +String(event.target.stats.mean*1000)+' ms'); - log('stddev time:' +String(event.target.stats.deviation*1000)+' ms'); - log(String(event.target)); - }) - .on('error', function(event) { log(`test case ${event.target.name} failed`); }) - .on('complete', function(event) { - log(`\n ###################################`) - log(`Finished testing ${event.currentTarget.length} cases \n`); - if (!isNodeJs) { - runButton.removeAttribute('disabled'); - runButton.setAttribute('class', 'btn btn-primary'); - runButton.innerHTML = 'Run'; - } }); } + function addThresholdModecase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let matSize = combination[i][0]; + let matType = 'CV_8UC1'; + let threshType = 'THRESH_BINARY|THRESH_OTSU'; + if (type != "sizeonly") { + matType = combination[i][1]; + threshType = combination[i][2]; + } + let params = {matSize: matSize, matType: matType, threshType: threshType, modeType: type}; + addKernelCase(suite, params, type, addThresholdCase); + } + } + function genBenchmarkCase(paramsContent) { let suite = new Benchmark.Suite; totalCaseNum = 0; currentCaseId = 0; - if (/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g.test(paramsContent.toString())) { - let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g)[0]; - let isSizeOnly = 0; - decodeParams2Case(suite, params, isSizeOnly); + let params = ""; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + + if (/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*THRESH\_\w+\)/g.test(paramsContent.toString())) { + params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*THRESH\_\w+\)/g)[0]; + paramObjs.push({name:"matType", value:"", reg:["/CV\_[0-9]+[A-z][A-z][0-9]/"], index:1}); + paramObjs.push({name:"threshType", value:"", reg:["/THRESH\_[A-z]+\_?[A-z]*/"], index:2}); } else if (/[\ ]*[0-9]+x[0-9]+[\ ]*/g.test(paramsContent.toString())) { - let params = paramsContent.toString().match(/[\ ]*[0-9]+x[0-9]+[\ ]*/g)[0]; - let isSizeOnly = 1; - decodeParams2Case(suite, params, isSizeOnly); + params = paramsContent.toString().match(/[\ ]*[0-9]+x[0-9]+[\ ]*/g)[0]; + paramObjs.push({name:"matType", value:"CV_8UC1", reg:[""], index:1}); + paramObjs.push({name:"threshType", value:"THRESH_BINARY|THRESH_OTSU", reg:[""], index:2}); } - else { + + if(params != ""){ + let locationList = decodeParams2Case(params, paramObjs,combinations); + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + if (first == 0) { + addThresholdModecase(suite, [combinations[first][second]], "normal"); + } else { + addThresholdModecase(suite, [combinations[first][second]], "sizeonly"); + } + } + } else { log("no filter or getting invalid params, run all the cases"); - addSizeMatTypeThreshTypeCase(suite, combiSizeMatTypeThreshType); - addSizeOnlyCase(suite, combiSizeOnly); + addThresholdModecase(suite, combiSizeMatTypeThreshType, "normal"); + addThresholdModecase(suite, combiSizeOnly, "sizeonly"); } - setBenchmarkSuite(suite); + setBenchmarkSuite(suite, "threshold", currentCaseId); log(`Running ${totalCaseNum} tests from Threshold`); suite.run({ 'async': true }); // run the benchmark } // init - let thresholdFunc = [addSizeMatTypeThreshTypeCase, addSizeOnlyCase]; let combinations = [combiSizeMatTypeThreshType, combiSizeOnly]; // set test filter params if (isNodeJs) { const args = process.argv.slice(2); let paramsContent = ''; - if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g.test(args.toString())) { - paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*\w+,[\ ]*\w+\)/g)[0]; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*THRESH\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*CV\_\w+,[\ ]*THRESH\_\w+\)/g)[0]; } else if (/--test_param_filter=[\ ]*[0-9]+x[0-9]+[\ ]*/g.test(args.toString())) { paramsContent = args.toString().match(/[\ ]*[0-9]+x[0-9]+[\ ]*/g)[0]; } @@ -208,10 +140,19 @@ cv.onRuntimeInitialized = () => { let paramsContent = paramsElement.value; genBenchmarkCase(paramsContent); if (totalCaseNum !== 0) { - runButton.setAttribute("disabled", "disabled"); - runButton.setAttribute('class', 'btn btn-primary disabled'); - runButton.innerHTML = "Running"; + disableButton(); } } } -}; \ No newline at end of file +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_warpAffine.html b/modules/js/perf/perf_imgproc/perf_warpAffine.html new file mode 100644 index 0000000000..53a0fd9d67 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_warpAffine.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ warpAffine +
+
+

Parameters Filter

+ for example: (640x480, INTER_NEAREST, BORDER_CONSTANT) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_warpAffine.js b/modules/js/perf/perf_imgproc/perf_warpAffine.js new file mode 100644 index 0000000000..c63cd60e61 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_warpAffine.js @@ -0,0 +1,130 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const WarpAffineSize = [cvSize.szVGA, cvSize.sz720p, cvSize.sz1080p]; + const InterType = ["INTER_NEAREST", "INTER_LINEAR"]; + const BorderMode = ["BORDER_CONSTANT", "BORDER_REPLICATE"] + const combiWarpAffine = combine(WarpAffineSize, InterType, BorderMode); + + function addWarpAffineCase(suite, type) { + suite.add('warpAffine', function() { + cv.warpAffine(src, dst, warpMat, sz, interType, borderMode, borderColor); + }, { + 'setup': function() { + let sz = this.params.size; + let interType = cv[this.params.interType]; + let borderMode = cv[this.params.borderMode]; + let srcSize = new cv.Size(512, 512); + + let borderColor = new cv.Scalar.all(150); + let src = new cv.Mat(srcSize, cv.CV_8UC4); + let dst = new cv.Mat(sz, cv.CV_8UC4); + fillGradient(cv, src); + if (borderMode == cv.BORDER_CONSTANT) { + smoothBorder(cv, src, borderMode, 1); + } + + let point = new cv.Point(src.cols/2.0, src.rows/2.0); + let warpMat = cv.getRotationMatrix2D(point, 30.0, 2.2); + }, + 'teardown': function() { + src.delete(); + dst.delete(); + warpMat.delete(); + } + }); + } + + function addWarpAffineModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let interType = combination[i][1]; + let borderMode = combination[i][2]; + + let params = {size: size, interType:interType, borderMode:borderMode}; + addKernelCase(suite, params, type, addWarpAffineCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"interType", value: "", reg:["/INTER\_\\w+/"], index:1}); + paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2}); + let locationList = decodeParams2Case(params, paramObjs, warpAffineCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addWarpAffineModeCase(suite, [warpAffineCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addWarpAffineModeCase(suite, combiWarpAffine, 0); + } + setBenchmarkSuite(suite, "warpAffine", currentCaseId); + log(`Running ${totalCaseNum} tests from warpAffine`); + suite.run({ 'async': true }); // run the benchmark + } + + let warpAffineCombinations = [combiWarpAffine]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_warpPerspective.html b/modules/js/perf/perf_imgproc/perf_warpPerspective.html new file mode 100644 index 0000000000..7fc4c89ad2 --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_warpPerspective.html @@ -0,0 +1,73 @@ + + + + + OpenCV.js Performance Test + + + + +
+
+
+

OpenCV.js Performance Test

+
+

Modules

+ Image Processing +
+
+

Kernels

+ warpPerspective +
+
+

Parameters Filter

+ for example: (640x480, INTER_NEAREST, BORDER_CONSTANT) +
+
+
+
+
+ + (It will take several minutes)
+
+
+
+

+          
+
+
+
+ + + + + + + + + + \ No newline at end of file diff --git a/modules/js/perf/perf_imgproc/perf_warpPerspective.js b/modules/js/perf/perf_imgproc/perf_warpPerspective.js new file mode 100644 index 0000000000..dcde2fb22c --- /dev/null +++ b/modules/js/perf/perf_imgproc/perf_warpPerspective.js @@ -0,0 +1,143 @@ +const isNodeJs = (typeof window) === 'undefined'? true : false; + +if (isNodeJs) { + var Benchmark = require('benchmark'); + var cv = require('../../opencv'); + var HelpFunc = require('../perf_helpfunc'); + var Base = require('../base'); +} else { + var paramsElement = document.getElementById('params'); + var runButton = document.getElementById('runButton'); + var logElement = document.getElementById('log'); +} + +function perf() { + + console.log('opencv.js loaded'); + if (isNodeJs) { + global.cv = cv; + global.combine = HelpFunc.combine; + global.cvtStr2cvSize = HelpFunc.cvtStr2cvSize; + global.cvSize = Base.getCvSize(); + } else { + enableButton(); + cvSize = getCvSize(); + } + let totalCaseNum, currentCaseId; + + const WarpPersSize = [cvSize.szVGA, cvSize.sz720p, cvSize.sz1080p]; + const InterType = ["INTER_NEAREST", "INTER_LINEAR"]; + const BorderMode = ["BORDER_CONSTANT", "BORDER_REPLICATE"] + const combiWarpPers = combine(WarpPersSize, InterType, BorderMode); + + function addWarpPerspectiveCase(suite, type) { + suite.add('warpPerspective', function() { + cv.warpPerspective(src, dst, warpMat, sz, interType, borderMode, borderColor); + }, { + 'setup': function() { + let sz = this.params.size; + let interType = cv[this.params.interType]; + let borderMode = cv[this.params.borderMode]; + let srcSize = new cv.Size(512, 512); + + let borderColor = new cv.Scalar.all(150); + let src = new cv.Mat(srcSize, cv.CV_8UC4); + let dst = new cv.Mat(sz, cv.CV_8UC4); + fillGradient(cv, src); + if (borderMode == cv.BORDER_CONSTANT) { + smoothBorder(cv, src, borderMode, 1); + } + + let rotMat = cv.getRotationMatrix2D(new cv.Point(src.cols/2.0, src.rows/2.0), 30.0, 2.2); + let warpMat = new cv.Mat(3, 3, cv.CV_64FC1); + + for(r=0; r<2; r++) { + for(c=0; c<3; c++) { + view = warpMat.doublePtr(r,c) + view[0] = rotMat.doubleAt(r, c); + } + } + view = warpMat.doublePtr(2,0); + view[0] = 0.3/sz.width; + view = warpMat.doublePtr(2,1); + view[0] = 0.3/sz.height; + view = warpMat.doublePtr(2,2); + view[0] = 1; + }, + 'teardown': function() { + src.delete(); + dst.delete(); + warpMat.delete(); + } + }); + } + + function addWarpPerspectiveModeCase(suite, combination, type) { + totalCaseNum += combination.length; + for (let i = 0; i < combination.length; ++i) { + let size = combination[i][0]; + let interType = combination[i][1]; + let borderMode = combination[i][2]; + + let params = {size: size, interType:interType, borderMode:borderMode}; + addKernelCase(suite, params, type, addWarpPerspectiveCase); + } + } + + function genBenchmarkCase(paramsContent) { + let suite = new Benchmark.Suite; + totalCaseNum = 0; + currentCaseId = 0; + + if (/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g.test(paramsContent.toString())) { + let params = paramsContent.toString().match(/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g)[0]; + let paramObjs = []; + paramObjs.push({name:"size", value:"", reg:[""], index:0}); + paramObjs.push({name:"interType", value: "", reg:["/INTER\_\\w+/"], index:1}); + paramObjs.push({name:"borderMode", value: "", reg:["/BORDER\_\\w+/"], index:2}); + let locationList = decodeParams2Case(params, paramObjs, warpPersCombinations); + + for (let i = 0; i < locationList.length; i++){ + let first = locationList[i][0]; + let second = locationList[i][1]; + addWarpPerspectiveModeCase(suite, [warpPersCombinations[first][second]], first); + } + } else { + log("no filter or getting invalid params, run all the cases"); + addWarpPerspectiveModeCase(suite, combiWarpPers, 0); + } + setBenchmarkSuite(suite, "warpPerspective", currentCaseId); + log(`Running ${totalCaseNum} tests from warpPerspective`); + suite.run({ 'async': true }); // run the benchmark + } + + let warpPersCombinations = [combiWarpPers]; + + if (isNodeJs) { + const args = process.argv.slice(2); + let paramsContent = ''; + if (/--test_param_filter=\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g.test(args.toString())) { + paramsContent = args.toString().match(/\([0-9]+x[0-9]+,[\ ]*INTER\_\w+,[\ ]*BORDER\_\w+\)/g)[0]; + } + genBenchmarkCase(paramsContent); + } else { + runButton.onclick = function() { + let paramsContent = paramsElement.value; + genBenchmarkCase(paramsContent); + if (totalCaseNum !== 0) { + disableButton(); + } + } + } +}; + +async function main() { + if (cv instanceof Promise) { + cv = await cv; + perf(); + } else { + cv.onRuntimeInitialized = perf; + } +} + +main(); \ No newline at end of file diff --git a/modules/js/src/loader.js b/modules/js/src/loader.js new file mode 100644 index 0000000000..ea100e8601 --- /dev/null +++ b/modules/js/src/loader.js @@ -0,0 +1,96 @@ +async function loadOpenCV(paths, onloadCallback) { + let OPENCV_URL = ""; + let asmPath = ""; + let wasmPath = ""; + let simdPath = ""; + let threadsPath = ""; + let threadsSimdPath = ""; + + if(!(paths instanceof Object)) { + throw new Error("The first input should be a object that points the path to the OpenCV.js"); + } + + if ("asm" in paths) { + asmPath = paths["asm"]; + } + + if ("wasm" in paths) { + wasmPath = paths["wasm"]; + } + + if ("threads" in paths) { + threadsPath = paths["threads"]; + } + + if ("simd" in paths) { + simdPath = paths["simd"]; + } + + if ("threadsSimd" in paths) { + threadsSimdPath = paths["threadsSimd"]; + } + + let wasmSupported = !(typeof WebAssembly === 'undefined'); + if (!wasmSupported && OPENCV_URL === "" && asmPath != "") { + OPENCV_URL = asmPath; + console.log("The OpenCV.js for Asm.js is loaded now"); + } else if (!wasmSupported && asmPath == ""){ + throw new Error("The browser supports the Asm.js only, but the path of OpenCV.js for Asm.js is empty"); + } + + let simdSupported = wasmSupported ? await wasmFeatureDetect.simd() : false; + let threadsSupported = wasmSupported ? await wasmFeatureDetect.threads() : false; + + if (simdSupported && threadsSupported && threadsSimdPath != "") { + OPENCV_URL = threadsSimdPath; + console.log("The OpenCV.js with simd and threads optimization is loaded now"); + } else if (simdSupported && simdPath != "") { + if (threadsSupported && threadsSimdPath === "") { + console.log("The browser supports simd and threads, but the path of OpenCV.js with simd and threads optimization is empty"); + } + OPENCV_URL = simdPath; + console.log("The OpenCV.js with simd optimization is loaded now."); + } else if (threadsSupported && threadsPath != "") { + if (simdSupported && threadsSimdPath === "") { + console.log("The browser supports simd and threads, but the path of OpenCV.js with simd and threads optimization is empty"); + } + OPENCV_URL = threadsPath; + console.log("The OpenCV.js with threads optimization is loaded now"); + } else if (wasmSupported && wasmPath != "") { + if(simdSupported && threadsSupported) { + console.log("The browser supports simd and threads, but the path of OpenCV.js with simd and threads optimization is empty"); + } + + if (simdSupported) { + console.log("The browser supports simd optimization, but the path of OpenCV.js with simd optimization is empty"); + } + + if (threadsSupported) { + console.log("The browser supports threads optimization, but the path of OpenCV.js with threads optimization is empty"); + } + + OPENCV_URL = wasmPath; + console.log("The OpenCV.js for wasm is loaded now"); + } else if (wasmSupported) { + console.log("The browser supports wasm, but the path of OpenCV.js for wasm is empty"); + } + + if (OPENCV_URL === "") { + throw new Error("No available OpenCV.js, please check your paths"); + } + + let script = document.createElement('script'); + script.setAttribute('async', ''); + script.setAttribute('type', 'text/javascript'); + script.addEventListener('load', () => { + onloadCallback(); + }); + script.addEventListener('error', () => { + console.log('Failed to load opencv.js'); + }); + script.src = OPENCV_URL; + let node = document.getElementsByTagName('script')[0]; + if (node.src != OPENCV_URL) { + node.parentNode.insertBefore(script, node); + } +} \ No newline at end of file diff --git a/platforms/js/build_js.py b/platforms/js/build_js.py index fbeb1e4fb3..38e988a3bd 100644 --- a/platforms/js/build_js.py +++ b/platforms/js/build_js.py @@ -201,6 +201,9 @@ class Builder: def build_doc(self): execute(["make", "-j", str(multiprocessing.cpu_count()), "doxygen"]) + def build_loader(self): + execute(["make", "-j", str(multiprocessing.cpu_count()), "opencv_js_loader"]) + #=================================================================================================== @@ -221,6 +224,7 @@ if __name__ == "__main__": parser.add_argument('--build_test', action="store_true", help="Build tests") parser.add_argument('--build_perf', action="store_true", help="Build performance tests") parser.add_argument('--build_doc', action="store_true", help="Build tutorials") + parser.add_argument('--build_loader', action="store_true", help="Build OpenCV.js loader") parser.add_argument('--clean_build_dir', action="store_true", help="Clean build dir") parser.add_argument('--skip_config', action="store_true", help="Skip cmake config") parser.add_argument('--config_only', action="store_true", help="Only do cmake config") @@ -292,6 +296,11 @@ if __name__ == "__main__": log.info("=====") builder.build_doc() + if args.build_loader: + log.info("=====") + log.info("===== Building OpenCV.js loader") + log.info("=====") + builder.build_loader() log.info("=====") log.info("===== Build finished") @@ -316,3 +325,8 @@ if __name__ == "__main__": opencvjs_tutorial_path = find_file("tutorial_js_root.html", os.path.join(builder.build_dir, "doc", "doxygen", "html")) if check_file(opencvjs_tutorial_path): log.info("OpenCV.js tutorials location: %s", opencvjs_tutorial_path) + + if args.build_loader: + opencvjs_loader_path = os.path.join(builder.build_dir, "bin", "loader.js") + if check_file(opencvjs_loader_path): + log.info("OpenCV.js loader location: %s", opencvjs_loader_path) From 5ac0712cf1f25af2224afd1776ca9476e39f85d8 Mon Sep 17 00:00:00 2001 From: masa-iwm <37230118+masa-iwm@users.noreply.github.com> Date: Mon, 19 Oct 2020 06:22:06 +0900 Subject: [PATCH 033/152] Merge pull request #18593 from masa-iwm:master Add support thread-local directx (OpenCL interop) initialization * support thread-local directx (OpenCL interop) initialization * reflect reviews * Remove verbose function prototype declarations * Countermeasures for VC warnings. (declaration of 'platform' hides class member) * core(directx): remove internal stuff from public headers --- modules/core/src/directx.cpp | 423 ++++++++++++++++++++--------------- modules/core/src/directx.hpp | 23 ++ modules/core/src/ocl.cpp | 34 +++ 3 files changed, 295 insertions(+), 185 deletions(-) create mode 100644 modules/core/src/directx.hpp diff --git a/modules/core/src/directx.cpp b/modules/core/src/directx.cpp index c9bd1a4fa1..f028702d7f 100644 --- a/modules/core/src/directx.cpp +++ b/modules/core/src/directx.cpp @@ -49,6 +49,7 @@ #ifdef HAVE_DIRECTX #include #include "directx.inc.hpp" +#include "directx.hpp" #else // HAVE_DIRECTX #define NO_DIRECTX_SUPPORT_ERROR CV_Error(cv::Error::StsBadFunc, "OpenCV was build without DirectX support") #endif @@ -234,11 +235,191 @@ int getTypeFromD3DFORMAT(const int iD3DFORMAT) #endif } -namespace ocl { - #if defined(HAVE_DIRECTX) && defined(HAVE_OPENCL) -static bool g_isDirect3DDevice9Ex = false; // Direct3DDevice9Ex or Direct3DDevice9 was used +namespace internal { +struct OpenCLDirectXImpl +{ + cl_platform_id platform_; + + cl_platform_id initializedPlatform9 = NULL; + cl_platform_id initializedPlatform10 = NULL; + cl_platform_id initializedPlatform11 = NULL; +public: + OpenCLDirectXImpl() + : platform_(0) + { + } + + bool isDirect3DDevice9Ex = false; // Direct3DDevice9Ex or Direct3DDevice9 was used + +#ifdef HAVE_OPENCL_D3D11_NV + clCreateFromD3D11Texture2DNV_fn clCreateFromD3D11Texture2DNV = NULL; + clEnqueueAcquireD3D11ObjectsNV_fn clEnqueueAcquireD3D11ObjectsNV = NULL; + clEnqueueReleaseD3D11ObjectsNV_fn clEnqueueReleaseD3D11ObjectsNV = NULL; #endif + clCreateFromD3D11Texture2DKHR_fn clCreateFromD3D11Texture2DKHR = NULL; + clEnqueueAcquireD3D11ObjectsKHR_fn clEnqueueAcquireD3D11ObjectsKHR = NULL; + clEnqueueReleaseD3D11ObjectsKHR_fn clEnqueueReleaseD3D11ObjectsKHR = NULL; + + clCreateFromD3D10Texture2DKHR_fn clCreateFromD3D10Texture2DKHR = NULL; + clEnqueueAcquireD3D10ObjectsKHR_fn clEnqueueAcquireD3D10ObjectsKHR = NULL; + clEnqueueReleaseD3D10ObjectsKHR_fn clEnqueueReleaseD3D10ObjectsKHR = NULL; + + clCreateFromDX9MediaSurfaceKHR_fn clCreateFromDX9MediaSurfaceKHR = NULL; + clEnqueueAcquireDX9MediaSurfacesKHR_fn clEnqueueAcquireDX9MediaSurfacesKHR = NULL; + clEnqueueReleaseDX9MediaSurfacesKHR_fn clEnqueueReleaseDX9MediaSurfacesKHR = NULL; + + cl_platform_id getPlatform() + { + if (!platform_) + { + CV_Assert(cv::ocl::haveOpenCL()); + + cl_device_id device = (cl_device_id)ocl::Device::getDefault().ptr(); + CV_Assert(device); + cl_int status = clGetDeviceInfo(device, CL_DEVICE_PLATFORM, sizeof(platform_), &platform_, NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get platform corresponding to device"); + } + + return platform_; + } + + + bool initializeD3D11() + { + using namespace cv::ocl; + cl_platform_id platform = getPlatform(); + + bool useCLNVEXT = false; + size_t exts_len; + cl_int status = clGetPlatformInfo(platform, CL_PLATFORM_EXTENSIONS, 0, NULL, &exts_len); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get length of CL_PLATFORM_EXTENSIONS"); + cv::AutoBuffer extensions(exts_len); + status = clGetPlatformInfo(platform, CL_PLATFORM_EXTENSIONS, exts_len, static_cast(extensions.data()), NULL); + if (status != CL_SUCCESS) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: No available CL_PLATFORM_EXTENSIONS"); + bool is_support_cl_khr_d3d11_sharing = false; + if (strstr(extensions.data(), "cl_khr_d3d11_sharing")) + is_support_cl_khr_d3d11_sharing = true; +#ifdef HAVE_OPENCL_D3D11_NV + bool is_support_cl_nv_d3d11_sharing = false; + if (strstr(extensions.data(), "cl_nv_d3d11_sharing")) + is_support_cl_nv_d3d11_sharing = true; + if (!is_support_cl_nv_d3d11_sharing && !is_support_cl_khr_d3d11_sharing) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: No supported extensions"); +#else + if (!is_support_cl_khr_d3d11_sharing) + CV_Error(cv::Error::OpenCLInitError, "OpenCL: No supported extensions"); +#endif + +#ifdef HAVE_OPENCL_D3D11_NV + if (is_support_cl_nv_d3d11_sharing) + { + if (initializedPlatform11 != platform) + { + clCreateFromD3D11Texture2DNV = (clCreateFromD3D11Texture2DNV_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D11Texture2DNV"); + clEnqueueAcquireD3D11ObjectsNV = (clEnqueueAcquireD3D11ObjectsNV_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D11ObjectsNV"); + clEnqueueReleaseD3D11ObjectsNV = (clEnqueueReleaseD3D11ObjectsNV_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D11ObjectsNV"); + initializedPlatform11 = platform; + } + if (clCreateFromD3D11Texture2DNV && clEnqueueAcquireD3D11ObjectsNV && clEnqueueReleaseD3D11ObjectsNV) + { + useCLNVEXT = true; + } + } + else +#endif + { + if (is_support_cl_khr_d3d11_sharing) + { + if (initializedPlatform11 != platform) + { + clCreateFromD3D11Texture2DKHR = (clCreateFromD3D11Texture2DKHR_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D11Texture2DKHR"); + clEnqueueAcquireD3D11ObjectsKHR = (clEnqueueAcquireD3D11ObjectsKHR_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D11ObjectsKHR"); + clEnqueueReleaseD3D11ObjectsKHR = (clEnqueueReleaseD3D11ObjectsKHR_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D11ObjectsKHR"); + initializedPlatform11 = platform; + } + if (!clCreateFromD3D11Texture2DKHR || !clEnqueueAcquireD3D11ObjectsKHR || !clEnqueueReleaseD3D11ObjectsKHR) + { + CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D11"); + } + } + } + return useCLNVEXT; + } + + void initializeD3D9() + { + using namespace cv::ocl; + cl_platform_id platform = getPlatform(); + if (initializedPlatform9 != platform) + { + clCreateFromDX9MediaSurfaceKHR = (clCreateFromDX9MediaSurfaceKHR_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromDX9MediaSurfaceKHR"); + clEnqueueAcquireDX9MediaSurfacesKHR = (clEnqueueAcquireDX9MediaSurfacesKHR_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireDX9MediaSurfacesKHR"); + clEnqueueReleaseDX9MediaSurfacesKHR = (clEnqueueReleaseDX9MediaSurfacesKHR_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseDX9MediaSurfacesKHR"); + initializedPlatform9 = platform; + } + if (!clCreateFromDX9MediaSurfaceKHR || !clEnqueueAcquireDX9MediaSurfacesKHR || !clEnqueueReleaseDX9MediaSurfacesKHR) + { + CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D9"); + } + } + + void initializeD3D10() + { + using namespace cv::ocl; + cl_platform_id platform = getPlatform(); + if (initializedPlatform10 != platform) + { + clCreateFromD3D10Texture2DKHR = (clCreateFromD3D10Texture2DKHR_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D10Texture2DKHR"); + clEnqueueAcquireD3D10ObjectsKHR = (clEnqueueAcquireD3D10ObjectsKHR_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D10ObjectsKHR"); + clEnqueueReleaseD3D10ObjectsKHR = (clEnqueueReleaseD3D10ObjectsKHR_fn) + clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D10ObjectsKHR"); + initializedPlatform10 = platform; + } + if (!clCreateFromD3D10Texture2DKHR || !clEnqueueAcquireD3D10ObjectsKHR || !clEnqueueReleaseD3D10ObjectsKHR) + { + CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D10"); + } + } +}; + +OpenCLDirectXImpl* createDirectXImpl() +{ + return new OpenCLDirectXImpl(); +} +void deleteDirectXImpl(OpenCLDirectXImpl** p) +{ + if (*p) + { + delete (*p); + *p = NULL; + } +} +OpenCLDirectXImpl& getImpl() +{ + OpenCLDirectXImpl* i = getDirectXImpl(ocl::Context::getDefault()); + CV_Assert(i); + return *i; +} +} +using namespace internal; +#endif + +namespace ocl { Context& initializeContextFromD3D11Device(ID3D11Device* pD3D11Device) { @@ -715,7 +896,7 @@ Context& initializeContextFromDirect3DDevice9Ex(IDirect3DDevice9Ex* pDirect3DDev throw; } clExecCtx.bind(); - g_isDirect3DDevice9Ex = true; + getImpl().isDirect3DDevice9Ex = true; return const_cast(clExecCtx.getContext()); #endif } @@ -838,96 +1019,13 @@ Context& initializeContextFromDirect3DDevice9(IDirect3DDevice9* pDirect3DDevice9 throw; } clExecCtx.bind(); - g_isDirect3DDevice9Ex = false; + getImpl().isDirect3DDevice9Ex = false; return const_cast(clExecCtx.getContext()); #endif } } // namespace cv::ocl -#if defined(HAVE_DIRECTX) && defined(HAVE_OPENCL) - -#ifdef HAVE_OPENCL_D3D11_NV -clCreateFromD3D11Texture2DNV_fn clCreateFromD3D11Texture2DNV = NULL; -clEnqueueAcquireD3D11ObjectsNV_fn clEnqueueAcquireD3D11ObjectsNV = NULL; -clEnqueueReleaseD3D11ObjectsNV_fn clEnqueueReleaseD3D11ObjectsNV = NULL; -#endif -clCreateFromD3D11Texture2DKHR_fn clCreateFromD3D11Texture2DKHR = NULL; -clEnqueueAcquireD3D11ObjectsKHR_fn clEnqueueAcquireD3D11ObjectsKHR = NULL; -clEnqueueReleaseD3D11ObjectsKHR_fn clEnqueueReleaseD3D11ObjectsKHR = NULL; - -static bool __OpenCLinitializeD3D11() -{ - using namespace cv::ocl; - static cl_platform_id initializedPlatform = NULL; - cl_platform_id platform = (cl_platform_id)Platform::getDefault().ptr(); - - bool useCLNVEXT = false; - size_t exts_len; - cl_int status = clGetPlatformInfo(platform, CL_PLATFORM_EXTENSIONS, 0, NULL, &exts_len); - if (status != CL_SUCCESS) - CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get length of CL_PLATFORM_EXTENSIONS"); - cv::AutoBuffer extensions(exts_len); - status = clGetPlatformInfo(platform, CL_PLATFORM_EXTENSIONS, exts_len, static_cast(extensions.data()), NULL); - if (status != CL_SUCCESS) - CV_Error(cv::Error::OpenCLInitError, "OpenCL: No available CL_PLATFORM_EXTENSIONS"); - bool is_support_cl_khr_d3d11_sharing = false; - if (strstr(extensions.data(), "cl_khr_d3d11_sharing")) - is_support_cl_khr_d3d11_sharing = true; -#ifdef HAVE_OPENCL_D3D11_NV - bool is_support_cl_nv_d3d11_sharing = false; - if (strstr(extensions.data(), "cl_nv_d3d11_sharing")) - is_support_cl_nv_d3d11_sharing = true; - if (!is_support_cl_nv_d3d11_sharing && !is_support_cl_khr_d3d11_sharing) - CV_Error(cv::Error::OpenCLInitError, "OpenCL: No supported extensions"); -#else - if (!is_support_cl_khr_d3d11_sharing) - CV_Error(cv::Error::OpenCLInitError, "OpenCL: No supported extensions"); -#endif - -#ifdef HAVE_OPENCL_D3D11_NV - if (is_support_cl_nv_d3d11_sharing) - { - if (initializedPlatform != platform) - { - clCreateFromD3D11Texture2DNV = (clCreateFromD3D11Texture2DNV_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D11Texture2DNV"); - clEnqueueAcquireD3D11ObjectsNV = (clEnqueueAcquireD3D11ObjectsNV_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D11ObjectsNV"); - clEnqueueReleaseD3D11ObjectsNV = (clEnqueueReleaseD3D11ObjectsNV_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D11ObjectsNV"); - initializedPlatform = platform; - } - if (clCreateFromD3D11Texture2DNV && clEnqueueAcquireD3D11ObjectsNV && clEnqueueReleaseD3D11ObjectsNV) - { - useCLNVEXT = true; - } - } - else -#endif - { - if (is_support_cl_khr_d3d11_sharing) - { - if (initializedPlatform != platform) - { - clCreateFromD3D11Texture2DKHR = (clCreateFromD3D11Texture2DKHR_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D11Texture2DKHR"); - clEnqueueAcquireD3D11ObjectsKHR = (clEnqueueAcquireD3D11ObjectsKHR_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D11ObjectsKHR"); - clEnqueueReleaseD3D11ObjectsKHR = (clEnqueueReleaseD3D11ObjectsKHR_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D11ObjectsKHR"); - initializedPlatform = platform; - } - if (!clCreateFromD3D11Texture2DKHR || !clEnqueueAcquireD3D11ObjectsKHR || !clEnqueueReleaseD3D11ObjectsKHR) - { - CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D11"); - } - } - } - return useCLNVEXT; -} -#endif // defined(HAVE_DIRECTX) && defined(HAVE_OPENCL) - } // namespace directx @@ -1009,20 +1107,21 @@ static void __convertToD3D11Texture2DKHR(InputArray src, ID3D11Texture2D* pD3D11 using namespace cv::ocl; Context& ctx = Context::getDefault(); cl_context context = (cl_context)ctx.ptr(); + OpenCLDirectXImpl& impl = getImpl(); cl_int status = 0; cl_mem clImage = 0; #ifdef HAVE_DIRECTX_NV12 cl_mem clImageUV = 0; #endif - clImage = clCreateFromD3D11Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 0, &status); + clImage = impl.clCreateFromD3D11Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 0, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DKHR failed"); #ifdef HAVE_DIRECTX_NV12 if(DXGI_FORMAT_NV12 == desc.Format) { - clImageUV = clCreateFromD3D11Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 1, &status); + clImageUV = impl.clCreateFromD3D11Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 1, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DKHR failed"); } @@ -1030,21 +1129,21 @@ static void __convertToD3D11Texture2DKHR(InputArray src, ID3D11Texture2D* pD3D11 cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - status = clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsKHR failed"); #ifdef HAVE_DIRECTX_NV12 if(DXGI_FORMAT_NV12 == desc.Format) { - status = clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsKHR failed"); if(!ocl::ocl_convert_bgr_to_nv12(clBuffer, (int)u.step[0], u.cols, u.rows, clImage, clImageUV)) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_bgr_to_nv12 failed"); - status = clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsKHR failed"); } @@ -1060,7 +1159,7 @@ static void __convertToD3D11Texture2DKHR(InputArray src, ID3D11Texture2D* pD3D11 CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed"); } - status = clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsKHR failed"); @@ -1107,40 +1206,41 @@ static void __convertToD3D11Texture2DNV(InputArray src, ID3D11Texture2D* pD3D11T using namespace cv::ocl; Context& ctx = Context::getDefault(); cl_context context = (cl_context)ctx.ptr(); + OpenCLDirectXImpl& impl = getImpl(); cl_int status = 0; cl_mem clImage = 0; #ifdef HAVE_DIRECTX_NV12 cl_mem clImageUV = 0; #endif - clImage = clCreateFromD3D11Texture2DNV(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 0, &status); + clImage = impl.clCreateFromD3D11Texture2DNV(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 0, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DNV failed"); #ifdef HAVE_DIRECTX_NV12 if (DXGI_FORMAT_NV12 == desc.Format) { - clImageUV = clCreateFromD3D11Texture2DNV(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 1, &status); + clImageUV = impl.clCreateFromD3D11Texture2DNV(context, CL_MEM_WRITE_ONLY, pD3D11Texture2D, 1, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DNV failed"); } #endif cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - status = clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsNV failed"); #ifdef HAVE_DIRECTX_NV12 if(DXGI_FORMAT_NV12 == desc.Format) { - status = clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsNV failed"); if(!ocl::ocl_convert_bgr_to_nv12(clBuffer, (int)u.step[0], u.cols, u.rows, clImage, clImageUV)) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_bgr_to_nv12 failed"); - status = clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsNV failed"); } @@ -1156,7 +1256,7 @@ static void __convertToD3D11Texture2DNV(InputArray src, ID3D11Texture2D* pD3D11T CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed"); } - status = clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsNV failed"); @@ -1201,11 +1301,12 @@ static void __convertFromD3D11Texture2DKHR(ID3D11Texture2D* pD3D11Texture2D, Out using namespace cv::ocl; Context& ctx = Context::getDefault(); cl_context context = (cl_context)ctx.ptr(); + OpenCLDirectXImpl& impl = getImpl(); cl_int status = 0; cl_mem clImage = 0; - clImage = clCreateFromD3D11Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 0, &status); + clImage = impl.clCreateFromD3D11Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 0, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DKHR failed"); @@ -1213,7 +1314,7 @@ static void __convertFromD3D11Texture2DKHR(ID3D11Texture2D* pD3D11Texture2D, Out cl_mem clImageUV = 0; if(DXGI_FORMAT_NV12 == desc.Format) { - clImageUV = clCreateFromD3D11Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 1, &status); + clImageUV = impl.clCreateFromD3D11Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 1, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DKHR failed"); } @@ -1221,21 +1322,21 @@ static void __convertFromD3D11Texture2DKHR(ID3D11Texture2D* pD3D11Texture2D, Out cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - status = clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsKHR failed"); #ifdef HAVE_DIRECTX_NV12 if(DXGI_FORMAT_NV12 == desc.Format) { - status = clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsKHR failed"); if(!ocl::ocl_convert_nv12_to_bgr(clImage, clImageUV, clBuffer, (int)u.step[0], u.cols, u.rows)) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_nv12_to_bgr failed"); - status = clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImageUV, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsKHR failed"); } @@ -1251,7 +1352,7 @@ static void __convertFromD3D11Texture2DKHR(ID3D11Texture2D* pD3D11Texture2D, Out CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed"); } - status = clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D11ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsKHR failed"); @@ -1296,11 +1397,12 @@ static void __convertFromD3D11Texture2DNV(ID3D11Texture2D* pD3D11Texture2D, Outp using namespace cv::ocl; Context& ctx = Context::getDefault(); cl_context context = (cl_context)ctx.ptr(); + OpenCLDirectXImpl& impl = getImpl(); cl_int status = 0; cl_mem clImage = 0; - clImage = clCreateFromD3D11Texture2DNV(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 0, &status); + clImage = impl.clCreateFromD3D11Texture2DNV(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 0, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DNV failed"); @@ -1308,28 +1410,28 @@ static void __convertFromD3D11Texture2DNV(ID3D11Texture2D* pD3D11Texture2D, Outp cl_mem clImageUV = 0; if(DXGI_FORMAT_NV12 == desc.Format) { - clImageUV = clCreateFromD3D11Texture2DNV(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 1, &status); + clImageUV = impl.clCreateFromD3D11Texture2DNV(context, CL_MEM_READ_ONLY, pD3D11Texture2D, 1, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D11Texture2DNV failed"); } #endif cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - status = clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsNV failed"); #ifdef HAVE_DIRECTX_NV12 if (DXGI_FORMAT::DXGI_FORMAT_NV12 == desc.Format) { - status = clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D11ObjectsNV failed"); if (!ocl::ocl_convert_nv12_to_bgr(clImage, clImageUV, clBuffer, (int)u.step[0], u.cols, u.rows)) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_nv12_to_bgr failed"); - status = clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImageUV, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsNV failed"); } @@ -1345,7 +1447,7 @@ static void __convertFromD3D11Texture2DNV(ID3D11Texture2D* pD3D11Texture2D, Outp CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed"); } - status = clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D11ObjectsNV(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D11ObjectsNV failed"); @@ -1377,7 +1479,7 @@ void convertToD3D11Texture2D(InputArray src, ID3D11Texture2D* pD3D11Texture2D) NO_OPENCL_SUPPORT_ERROR; #else - bool useCLNVEXT = __OpenCLinitializeD3D11(); + bool useCLNVEXT = getImpl().initializeD3D11(); if(!useCLNVEXT){ __convertToD3D11Texture2DKHR(src,pD3D11Texture2D); } @@ -1399,7 +1501,7 @@ void convertFromD3D11Texture2D(ID3D11Texture2D* pD3D11Texture2D, OutputArray dst NO_OPENCL_SUPPORT_ERROR; #else - bool useCLNVEXT = __OpenCLinitializeD3D11(); + bool useCLNVEXT = getImpl().initializeD3D11(); if(!useCLNVEXT){ __convertFromD3D11Texture2DKHR(pD3D11Texture2D,dst); } @@ -1412,40 +1514,14 @@ void convertFromD3D11Texture2D(ID3D11Texture2D* pD3D11Texture2D, OutputArray dst #endif } -#if defined(HAVE_DIRECTX) && defined(HAVE_OPENCL) -clCreateFromD3D10Texture2DKHR_fn clCreateFromD3D10Texture2DKHR = NULL; -clEnqueueAcquireD3D10ObjectsKHR_fn clEnqueueAcquireD3D10ObjectsKHR = NULL; -clEnqueueReleaseD3D10ObjectsKHR_fn clEnqueueReleaseD3D10ObjectsKHR = NULL; - -static void __OpenCLinitializeD3D10() -{ - using namespace cv::ocl; - static cl_platform_id initializedPlatform = NULL; - cl_platform_id platform = (cl_platform_id)Platform::getDefault().ptr(); - if (initializedPlatform != platform) - { - clCreateFromD3D10Texture2DKHR = (clCreateFromD3D10Texture2DKHR_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromD3D10Texture2DKHR"); - clEnqueueAcquireD3D10ObjectsKHR = (clEnqueueAcquireD3D10ObjectsKHR_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireD3D10ObjectsKHR"); - clEnqueueReleaseD3D10ObjectsKHR = (clEnqueueReleaseD3D10ObjectsKHR_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseD3D10ObjectsKHR"); - initializedPlatform = platform; - } - if (!clCreateFromD3D10Texture2DKHR || !clEnqueueAcquireD3D10ObjectsKHR || !clEnqueueReleaseD3D10ObjectsKHR) - { - CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D10"); - } -} -#endif // defined(HAVE_DIRECTX) && defined(HAVE_OPENCL) - void convertToD3D10Texture2D(InputArray src, ID3D10Texture2D* pD3D10Texture2D) { CV_UNUSED(src); CV_UNUSED(pD3D10Texture2D); #if !defined(HAVE_DIRECTX) NO_DIRECTX_SUPPORT_ERROR; #elif defined(HAVE_OPENCL) - __OpenCLinitializeD3D10(); + OpenCLDirectXImpl& impl = getImpl(); + impl.initializeD3D10(); D3D10_TEXTURE2D_DESC desc = { 0 }; pD3D10Texture2D->GetDesc(&desc); @@ -1468,14 +1544,14 @@ void convertToD3D10Texture2D(InputArray src, ID3D10Texture2D* pD3D10Texture2D) CV_Assert(u.isContinuous()); cl_int status = 0; - cl_mem clImage = clCreateFromD3D10Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D10Texture2D, 0, &status); + cl_mem clImage = impl.clCreateFromD3D10Texture2DKHR(context, CL_MEM_WRITE_ONLY, pD3D10Texture2D, 0, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D10Texture2DKHR failed"); cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ); cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - status = clEnqueueAcquireD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D10ObjectsKHR failed"); size_t offset = 0; // TODO @@ -1484,7 +1560,7 @@ void convertToD3D10Texture2D(InputArray src, ID3D10Texture2D* pD3D10Texture2D) status = clEnqueueCopyBufferToImage(q, clBuffer, clImage, offset, dst_origin, region, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed"); - status = clEnqueueReleaseD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D10ObjectsKHR failed"); @@ -1506,7 +1582,8 @@ void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst #if !defined(HAVE_DIRECTX) NO_DIRECTX_SUPPORT_ERROR; #elif defined(HAVE_OPENCL) - __OpenCLinitializeD3D10(); + OpenCLDirectXImpl& impl = getImpl(); + impl.initializeD3D10(); D3D10_TEXTURE2D_DESC desc = { 0 }; pD3D10Texture2D->GetDesc(&desc); @@ -1527,14 +1604,14 @@ void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst CV_Assert(u.isContinuous()); cl_int status = 0; - cl_mem clImage = clCreateFromD3D10Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D10Texture2D, 0, &status); + cl_mem clImage = impl.clCreateFromD3D10Texture2DKHR(context, CL_MEM_READ_ONLY, pD3D10Texture2D, 0, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromD3D10Texture2DKHR failed"); cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ); cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - status = clEnqueueAcquireD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueAcquireD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireD3D10ObjectsKHR failed"); size_t offset = 0; // TODO @@ -1543,7 +1620,7 @@ void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst status = clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed"); - status = clEnqueueReleaseD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueReleaseD3D10ObjectsKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseD3D10ObjectsKHR failed"); @@ -1560,32 +1637,6 @@ void convertFromD3D10Texture2D(ID3D10Texture2D* pD3D10Texture2D, OutputArray dst #endif } -#if defined(HAVE_DIRECTX) && defined(HAVE_OPENCL) -clCreateFromDX9MediaSurfaceKHR_fn clCreateFromDX9MediaSurfaceKHR = NULL; -clEnqueueAcquireDX9MediaSurfacesKHR_fn clEnqueueAcquireDX9MediaSurfacesKHR = NULL; -clEnqueueReleaseDX9MediaSurfacesKHR_fn clEnqueueReleaseDX9MediaSurfacesKHR = NULL; - -static void __OpenCLinitializeD3D9() -{ - using namespace cv::ocl; - static cl_platform_id initializedPlatform = NULL; - cl_platform_id platform = (cl_platform_id)Platform::getDefault().ptr(); - if (initializedPlatform != platform) - { - clCreateFromDX9MediaSurfaceKHR = (clCreateFromDX9MediaSurfaceKHR_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromDX9MediaSurfaceKHR"); - clEnqueueAcquireDX9MediaSurfacesKHR = (clEnqueueAcquireDX9MediaSurfacesKHR_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireDX9MediaSurfacesKHR"); - clEnqueueReleaseDX9MediaSurfacesKHR = (clEnqueueReleaseDX9MediaSurfacesKHR_fn) - clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseDX9MediaSurfacesKHR"); - initializedPlatform = platform; - } - if (!clCreateFromDX9MediaSurfaceKHR || !clEnqueueAcquireDX9MediaSurfacesKHR || !clEnqueueReleaseDX9MediaSurfacesKHR) - { - CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't find functions for D3D9"); - } -} -#endif // defined(HAVE_DIRECTX) && defined(HAVE_OPENCL) void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurface9, void* surfaceSharedHandle) { @@ -1593,7 +1644,8 @@ void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurfa #if !defined(HAVE_DIRECTX) NO_DIRECTX_SUPPORT_ERROR; #elif defined(HAVE_OPENCL) - __OpenCLinitializeD3D9(); + OpenCLDirectXImpl& impl = getImpl(); + impl.initializeD3D9(); D3DSURFACE_DESC desc; if (FAILED(pDirect3DSurface9->GetDesc(&desc))) @@ -1620,8 +1672,8 @@ void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurfa cl_int status = 0; cl_dx9_surface_info_khr surfaceInfo = {pDirect3DSurface9, (HANDLE)surfaceSharedHandle}; - cl_mem clImage = clCreateFromDX9MediaSurfaceKHR(context, CL_MEM_WRITE_ONLY, - ocl::g_isDirect3DDevice9Ex ? CL_ADAPTER_D3D9EX_KHR : CL_ADAPTER_D3D9_KHR, + cl_mem clImage = impl.clCreateFromDX9MediaSurfaceKHR(context, CL_MEM_WRITE_ONLY, + impl.isDirect3DDevice9Ex ? CL_ADAPTER_D3D9EX_KHR : CL_ADAPTER_D3D9_KHR, &surfaceInfo, 0, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromDX9MediaSurfaceKHR failed"); @@ -1629,7 +1681,7 @@ void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurfa cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ); cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - status = clEnqueueAcquireDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueAcquireDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireDX9MediaSurfacesKHR failed"); size_t offset = 0; // TODO @@ -1638,7 +1690,7 @@ void convertToDirect3DSurface9(InputArray src, IDirect3DSurface9* pDirect3DSurfa status = clEnqueueCopyBufferToImage(q, clBuffer, clImage, offset, dst_origin, region, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyBufferToImage failed"); - status = clEnqueueReleaseDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueReleaseDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseDX9MediaSurfacesKHR failed"); @@ -1661,7 +1713,8 @@ void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArr #if !defined(HAVE_DIRECTX) NO_DIRECTX_SUPPORT_ERROR; #elif defined(HAVE_OPENCL) - __OpenCLinitializeD3D9(); + OpenCLDirectXImpl& impl = getImpl(); + impl.initializeD3D9(); D3DSURFACE_DESC desc; if (FAILED(pDirect3DSurface9->GetDesc(&desc))) @@ -1686,8 +1739,8 @@ void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArr cl_int status = 0; cl_dx9_surface_info_khr surfaceInfo = {pDirect3DSurface9, (HANDLE)surfaceSharedHandle}; - cl_mem clImage = clCreateFromDX9MediaSurfaceKHR(context, CL_MEM_READ_ONLY, - ocl::g_isDirect3DDevice9Ex ? CL_ADAPTER_D3D9EX_KHR : CL_ADAPTER_D3D9_KHR, + cl_mem clImage = impl.clCreateFromDX9MediaSurfaceKHR(context, CL_MEM_READ_ONLY, + impl.isDirect3DDevice9Ex ? CL_ADAPTER_D3D9EX_KHR : CL_ADAPTER_D3D9_KHR, &surfaceInfo, 0, &status); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromDX9MediaSurfaceKHR failed"); @@ -1695,7 +1748,7 @@ void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArr cl_mem clBuffer = (cl_mem)u.handle(ACCESS_WRITE); cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr(); - status = clEnqueueAcquireDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueAcquireDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireDX9MediaSurfacesKHR failed"); size_t offset = 0; // TODO @@ -1704,7 +1757,7 @@ void convertFromDirect3DSurface9(IDirect3DSurface9* pDirect3DSurface9, OutputArr status = clEnqueueCopyImageToBuffer(q, clImage, clBuffer, src_origin, region, offset, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueCopyImageToBuffer failed"); - status = clEnqueueReleaseDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL); + status = impl.clEnqueueReleaseDX9MediaSurfacesKHR(q, 1, &clImage, 0, NULL, NULL); if (status != CL_SUCCESS) CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseDX9MediaSurfacesKHR failed"); diff --git a/modules/core/src/directx.hpp b/modules/core/src/directx.hpp new file mode 100644 index 0000000000..9f23352d4d --- /dev/null +++ b/modules/core/src/directx.hpp @@ -0,0 +1,23 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#ifndef OPENCV_CORE_SRC_DIRECTX_HPP +#define OPENCV_CORE_SRC_DIRECTX_HPP + +#ifndef HAVE_DIRECTX +#error Invalid build configuration +#endif + +namespace cv { +namespace directx { +namespace internal { + +struct OpenCLDirectXImpl; +OpenCLDirectXImpl* createDirectXImpl(); +void deleteDirectXImpl(OpenCLDirectXImpl**); +OpenCLDirectXImpl* getDirectXImpl(ocl::Context& ctx); + +}}} // namespace internal + +#endif // OPENCV_CORE_SRC_DIRECTX_HPP diff --git a/modules/core/src/ocl.cpp b/modules/core/src/ocl.cpp index 0a82424ba1..a9bd974b9a 100644 --- a/modules/core/src/ocl.cpp +++ b/modules/core/src/ocl.cpp @@ -113,6 +113,10 @@ #include "opencv2/core/opencl/runtime/opencl_core.hpp" +#ifdef HAVE_DIRECTX +#include "directx.hpp" +#endif + #ifdef HAVE_OPENCL_SVM #include "opencv2/core/opencl/runtime/opencl_svm_20.hpp" #include "opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp" @@ -2327,6 +2331,9 @@ protected: , contextId(CV_XADD(&g_contextId, 1)) , configuration(configuration_) , handle(0) +#ifdef HAVE_DIRECTX + , p_directx_impl(0) +#endif #ifdef HAVE_OPENCL_SVM , svmInitialized(false) #endif @@ -2352,6 +2359,9 @@ protected: handle = NULL; } devices.clear(); +#ifdef HAVE_DIRECTX + directx::internal::deleteDirectXImpl(&p_directx_impl); +#endif } { @@ -2658,6 +2668,19 @@ public: return *bufferPoolHostPtr_.get(); } +#ifdef HAVE_DIRECTX + directx::internal::OpenCLDirectXImpl* p_directx_impl; + + directx::internal::OpenCLDirectXImpl* getDirectXImpl() + { + if (!p_directx_impl) + { + p_directx_impl = directx::internal::createDirectXImpl(); + } + return p_directx_impl; + } +#endif + #ifdef HAVE_OPENCL_SVM bool svmInitialized; bool svmAvailable; @@ -7286,4 +7309,15 @@ uint64 Timer::durationNS() const }} // namespace +#ifdef HAVE_DIRECTX +namespace cv { namespace directx { namespace internal { +OpenCLDirectXImpl* getDirectXImpl(ocl::Context& ctx) +{ + ocl::Context::Impl* i = ctx.getImpl(); + CV_Assert(i); + return i->getDirectXImpl(); +} +}}} // namespace cv::directx::internal +#endif + #endif // HAVE_OPENCL From dcfa23d5d2c3f158191b13c9004f33f87d3c397c Mon Sep 17 00:00:00 2001 From: Dmitry Budnikov Date: Mon, 19 Oct 2020 13:46:23 +0300 Subject: [PATCH 034/152] KW fixes --- modules/gapi/include/opencv2/gapi/media.hpp | 1 + modules/gapi/test/common/gapi_core_tests_inl.hpp | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/gapi/include/opencv2/gapi/media.hpp b/modules/gapi/include/opencv2/gapi/media.hpp index a7fe258757..f27cb80913 100644 --- a/modules/gapi/include/opencv2/gapi/media.hpp +++ b/modules/gapi/include/opencv2/gapi/media.hpp @@ -51,6 +51,7 @@ public: View(Ptrs&& ptrs, Strides&& strs, Callback &&cb = [](){}); View(const View&) = delete; View(View&&) = default; + View& operator = (const View&) = delete; ~View(); Ptrs ptr; diff --git a/modules/gapi/test/common/gapi_core_tests_inl.hpp b/modules/gapi/test/common/gapi_core_tests_inl.hpp index e350a14e65..e11324f070 100644 --- a/modules/gapi/test/common/gapi_core_tests_inl.hpp +++ b/modules/gapi/test/common/gapi_core_tests_inl.hpp @@ -618,7 +618,8 @@ TEST_P(SumTest, AccuracyTest) #undef countNonZero TEST_P(CountNonZeroTest, AccuracyTest) { - int out_cnz_gapi, out_cnz_ocv; + int out_cnz_gapi = -1; + int out_cnz_ocv = -2; // G-API code ////////////////////////////////////////////////////////////// cv::GMat in; From 49d5960a3275f58a7f98afecb2065c6368e914c2 Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Mon, 19 Oct 2020 14:19:17 +0300 Subject: [PATCH 035/152] Fix namespace for OCVCallHelper --- modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp index ef67930909..741fbe18f0 100644 --- a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp +++ b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp @@ -443,7 +443,7 @@ struct OCVStCallHelper, std::tuple> : template class GCPUKernelImpl: public cv::detail::KernelTag { - using CallHelper = detail::OCVCallHelper; + using CallHelper = cv::detail::OCVCallHelper; public: using API = K; @@ -497,7 +497,7 @@ private: template gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(Callable& c) { - using P = detail::OCVCallHelper; + using P = cv::detail::OCVCallHelper; return GOCVFunctor{ K::id() , &K::getOutMeta , std::bind(&P::callFunctor, std::placeholders::_1, std::ref(c)) @@ -507,7 +507,7 @@ gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(Callable& c) template gapi::cpu::GOCVFunctor gapi::cpu::ocv_kernel(const Callable& c) { - using P = detail::OCVCallHelper; + using P = cv::detail::OCVCallHelper; return GOCVFunctor{ K::id() , &K::getOutMeta , std::bind(&P::callFunctor, std::placeholders::_1, c) From bf49149c97fff299384e48831767d7d2ef7badd8 Mon Sep 17 00:00:00 2001 From: Daniel Motilla Date: Mon, 19 Oct 2020 14:35:03 +0200 Subject: [PATCH 036/152] Enable imshow to take GpuMat inputs in Python --- modules/python/src2/hdr_parser.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py index a486e0b71a..3a977e8605 100755 --- a/modules/python/src2/hdr_parser.py +++ b/modules/python/src2/hdr_parser.py @@ -958,7 +958,9 @@ class CppHeaderParser(object): else: decls.append(decl) - if self._generate_gpumat_decls and "cv.cuda" in decl[0]: + if self._generate_gpumat_decls and ("cv.cuda" in decl[0] or decl[0] in [ + "cv.imshow", # https://github.com/opencv/opencv/issues/18553 + ]): # If function takes as one of arguments Mat or vector - we want to create the # same declaration working with GpuMat args = decl[3] From b87f7a625eb0cd5239a27f1d15c4ce791d0951f4 Mon Sep 17 00:00:00 2001 From: Nikolai Date: Mon, 19 Oct 2020 20:39:19 +0300 Subject: [PATCH 037/152] Merge pull request #18426 from Varvrar:add-HEVC-codec-iOS-Mac * add HEVC(H.265) codec to iOS,Mac VideoWriter * Update cap_avfoundation_mac.mm add CV_FOURCC('h','v','c','1') for HEVC codec * Update cap_avfoundation.mm add CV_FOURCC('h','v','c','1') for HEVC codec * feat: add availability check for HEVC codec on iOS and OS X Co-authored-by: Vadim Levin --- modules/videoio/src/cap_avfoundation.mm | 16 +++++++++++++--- modules/videoio/src/cap_avfoundation_mac.mm | 14 ++++++++++++-- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/modules/videoio/src/cap_avfoundation.mm b/modules/videoio/src/cap_avfoundation.mm index 8ac8d85d8d..19f54be8c3 100644 --- a/modules/videoio/src/cap_avfoundation.mm +++ b/modules/videoio/src/cap_avfoundation.mm @@ -36,6 +36,7 @@ #include "opencv2/imgproc.hpp" #include "cap_interface.hpp" #include +#include #import #import @@ -1255,16 +1256,25 @@ CvVideoWriter_AVFoundation::CvVideoWriter_AVFoundation(const char* filename, int //exception; } - // Two codec supported AVVideoCodecH264 AVVideoCodecJPEG + // Three codec supported AVVideoCodecH264 AVVideoCodecJPEG AVVideoCodecTypeHEVC // On iPhone 3G H264 is not supported. if (fourcc == CV_FOURCC('J','P','E','G') || fourcc == CV_FOURCC('j','p','e','g') || - fourcc == CV_FOURCC('M','J','P','G') || fourcc == CV_FOURCC('m','j','p','g') ){ + fourcc == CV_FOURCC('M','J','P','G') || fourcc == CV_FOURCC('m','j','p','g')){ codec = [AVVideoCodecJPEG copy]; // Use JPEG codec if specified, otherwise H264 }else if(fourcc == CV_FOURCC('H','2','6','4') || fourcc == CV_FOURCC('a','v','c','1')){ codec = [AVVideoCodecH264 copy]; +// Available since iOS 11 +#if defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 110000 + }else if(fourcc == CV_FOURCC('H','2','6','5') || fourcc == CV_FOURCC('h','v','c','1') || + fourcc == CV_FOURCC('H','E','V','C') || fourcc == CV_FOURCC('h','e','v','c')){ + if (@available(iOS 11, *)) { + codec = [AVVideoCodecTypeHEVC copy]; + } else { + codec = [AVVideoCodecH264 copy]; + } +#endif }else{ codec = [AVVideoCodecH264 copy]; // default canonical H264. - } //NSLog(@"Path: %@", path); diff --git a/modules/videoio/src/cap_avfoundation_mac.mm b/modules/videoio/src/cap_avfoundation_mac.mm index 011bc08466..ed966ceffa 100644 --- a/modules/videoio/src/cap_avfoundation_mac.mm +++ b/modules/videoio/src/cap_avfoundation_mac.mm @@ -1199,13 +1199,23 @@ CvVideoWriter_AVFoundation::CvVideoWriter_AVFoundation(const std::string &filena is_good = false; } - // Two codec supported AVVideoCodecH264 AVVideoCodecJPEG + // Three codec supported AVVideoCodecH264 AVVideoCodecJPEG AVVideoCodecTypeHEVC // On iPhone 3G H264 is not supported. if (fourcc == CV_FOURCC('J','P','E','G') || fourcc == CV_FOURCC('j','p','e','g') || - fourcc == CV_FOURCC('M','J','P','G') || fourcc == CV_FOURCC('m','j','p','g') ){ + fourcc == CV_FOURCC('M','J','P','G') || fourcc == CV_FOURCC('m','j','p','g')){ codec = [AVVideoCodecJPEG copy]; // Use JPEG codec if specified, otherwise H264 }else if(fourcc == CV_FOURCC('H','2','6','4') || fourcc == CV_FOURCC('a','v','c','1')){ codec = [AVVideoCodecH264 copy]; + // Available since macOS 10.13 +#if defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101300 + }else if(fourcc == CV_FOURCC('H','2','6','5') || fourcc == CV_FOURCC('h','v','c','1') || + fourcc == CV_FOURCC('H','E','V','C') || fourcc == CV_FOURCC('h','e','v','c')){ + if (@available(macOS 10.13, *)) { + codec = [AVVideoCodecTypeHEVC copy]; + } else { + is_good = false; + } +#endif }else{ is_good = false; } From 456af21d8bbe29341141b149360593811f608a59 Mon Sep 17 00:00:00 2001 From: Zhiyuan Chen Date: Sun, 18 Oct 2020 22:33:45 +0800 Subject: [PATCH 038/152] fixes #18613 --- samples/dnn/siamrpnpp.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/dnn/siamrpnpp.py b/samples/dnn/siamrpnpp.py index bb126b71e5..c7c49b1b85 100644 --- a/samples/dnn/siamrpnpp.py +++ b/samples/dnn/siamrpnpp.py @@ -234,10 +234,10 @@ class SiamRPNTracker: """ Args: img(np.ndarray): bgr based input image frame - bbox: (x,y,w,h): bounding box + bbox: (x, y, w, h): bounding box """ - x,y,h,w = bbox - self.center_pos = np.array([x + (h - 1) / 2, y + (w - 1) / 2]) + x, y, w, h = bbox + self.center_pos = np.array([x + (w - 1) / 2, y + (h - 1) / 2]) self.h = h self.w = w w_z = self.w + self.track_context_amount * np.add(h, w) From 331b73c8e4cbbade998ab24ac45f3fd8f7e306db Mon Sep 17 00:00:00 2001 From: lizz Date: Tue, 20 Oct 2020 12:09:03 +0800 Subject: [PATCH 039/152] Typo in docstring of distanceTransform --- modules/imgproc/include/opencv2/imgproc.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index d75f3bcffc..2739d28ff9 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -3500,7 +3500,7 @@ but also identifies the nearest connected component consisting of zero pixels (labelType==#DIST_LABEL_CCOMP) or the nearest zero pixel (labelType==#DIST_LABEL_PIXEL). Index of the component/pixel is stored in `labels(x, y)`. When labelType==#DIST_LABEL_CCOMP, the function automatically finds connected components of zero pixels in the input image and marks them with -distinct labels. When labelType==#DIST_LABEL_CCOMP, the function scans through the input image and +distinct labels. When labelType==#DIST_LABEL_PIXEL, the function scans through the input image and marks all the zero pixels with distinct labels. In this mode, the complexity is still linear. That is, the function provides a very fast way to From 2669d8ce73e1ab82c5edd107a5179a54daed7997 Mon Sep 17 00:00:00 2001 From: Alexey Smirnov Date: Tue, 20 Oct 2020 23:58:54 +0300 Subject: [PATCH 040/152] Merge pull request #18584 from smirnov-alexey:as/rmat_s11n [G-API]: Introduce RMat serialization API * Introduce RMat serialization API * Fix RunArgs deserialization * Address review comments * Export operators for GRunArg serialization * Fix warning and add handling for RMat in bind() * Update CMakeLists.txt * G-API: RMat S11N -- probably fix the Windows warning --- modules/gapi/include/opencv2/gapi/rmat.hpp | 20 ++++ modules/gapi/include/opencv2/gapi/s11n.hpp | 112 +++++++++++++++++- modules/gapi/src/api/s11n.cpp | 6 + .../src/backends/common/serialization.cpp | 6 +- .../src/backends/common/serialization.hpp | 60 ---------- modules/gapi/test/s11n/gapi_s11n_tests.cpp | 63 ++++++++++ 6 files changed, 203 insertions(+), 64 deletions(-) diff --git a/modules/gapi/include/opencv2/gapi/rmat.hpp b/modules/gapi/include/opencv2/gapi/rmat.hpp index 626e67e9ee..ff834b46b1 100644 --- a/modules/gapi/include/opencv2/gapi/rmat.hpp +++ b/modules/gapi/include/opencv2/gapi/rmat.hpp @@ -10,6 +10,16 @@ #include #include +// Forward declaration +namespace cv { +namespace gapi { +namespace s11n { + struct IOStream; + struct IIStream; +} // namespace s11n +} // namespace gapi +} // namespace cv + namespace cv { // "Remote Mat", a general class which provides an abstraction layer over the data @@ -90,6 +100,12 @@ public: // the view when accessed for writing, to ensure that the data from the view // is transferred to the device when the view is destroyed virtual View access(Access) = 0; + virtual void serialize(cv::gapi::s11n::IOStream&) { + GAPI_Assert(false && "Generic serialize method should never be called for RMat adapter"); + } + virtual void deserialize(cv::gapi::s11n::IIStream&) { + GAPI_Assert(false && "Generic deserialize method should never be called for RMat adapter"); + } }; using AdapterP = std::shared_ptr; @@ -113,6 +129,10 @@ public: return dynamic_cast(m_adapter.get()); } + void serialize(cv::gapi::s11n::IOStream& os) const { + m_adapter->serialize(os); + } + private: AdapterP m_adapter = nullptr; }; diff --git a/modules/gapi/include/opencv2/gapi/s11n.hpp b/modules/gapi/include/opencv2/gapi/s11n.hpp index e8a8dbcab4..2fa4e51176 100644 --- a/modules/gapi/include/opencv2/gapi/s11n.hpp +++ b/modules/gapi/include/opencv2/gapi/s11n.hpp @@ -12,6 +12,7 @@ #include #include #include +#include namespace cv { namespace gapi { @@ -25,6 +26,9 @@ namespace detail { template cv::GCompileArgs getCompileArgs(const std::vector &p); + + template + cv::GRunArgs getRunArgsWithRMats(const std::vector &p); } // namespace detail GAPI_EXPORTS std::vector serialize(const cv::GComputation &c); @@ -59,6 +63,12 @@ typename std::enable_if::value, GCompileArgs>:: type deserialize(const std::vector &p) { return detail::getCompileArgs(p); } + +template inline +typename std::enable_if::value, GRunArgs>:: +type deserialize(const std::vector &p) { + return detail::getRunArgsWithRMats(p); +} } // namespace gapi } // namespace cv @@ -123,6 +133,27 @@ GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Scalar &s); GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::Mat &m); GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::Mat &m); +// FIXME: for GRunArgs serailization +#if !defined(GAPI_STANDALONE) +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::UMat &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::UMat &); +#endif // !defined(GAPI_STANDALONE) + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::RMat &r); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::RMat &r); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::IStreamSource::Ptr &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::IStreamSource::Ptr &); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::VectorRef &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::VectorRef &); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::OpaqueRef &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::OpaqueRef &); + +GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::MediaFrame &); +GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::MediaFrame &); + // Generic STL types //////////////////////////////////////////////////////////////// template IOStream& operator<< (IOStream& os, const std::map &m) { @@ -184,6 +215,52 @@ IIStream& operator>> (IIStream& is, std::vector &ts) { } return is; } + +// Generic: variant serialization +namespace detail { +template +IOStream& put_v(IOStream&, const V&, std::size_t) { + GAPI_Assert(false && "variant>>: requested index is invalid"); +}; +template +IOStream& put_v(IOStream& os, const V& v, std::size_t x) { + return (x == 0u) + ? os << cv::util::get(v) + : put_v(os, v, x-1); +} +template +IIStream& get_v(IIStream&, V&, std::size_t, std::size_t) { + GAPI_Assert(false && "variant<<: requested index is invalid"); +} +template +IIStream& get_v(IIStream& is, V& v, std::size_t i, std::size_t gi) { + if (i == gi) { + X x{}; + is >> x; + v = V{std::move(x)}; + return is; + } else return get_v(is, v, i+1, gi); +} +} // namespace detail + +template +IOStream& operator<< (IOStream& os, const cv::util::variant &v) { + os << static_cast(v.index()); + return detail::put_v, Ts...>(os, v, v.index()); +} +template +IIStream& operator>> (IIStream& is, cv::util::variant &v) { + int idx = -1; + is >> idx; + GAPI_Assert(idx >= 0 && idx < (int)sizeof...(Ts)); + return detail::get_v, Ts...>(is, v, 0u, idx); +} + +// FIXME: consider a better solution +template +void getRunArgByIdx (IIStream& is, cv::util::variant &v, uint32_t idx) { + is = detail::get_v, Ts...>(is, v, 0u, idx); +} } // namespace s11n namespace detail @@ -204,11 +281,27 @@ static GCompileArg exec(cv::gapi::s11n::IIStream& is, const std::string& tag) { cv::gapi::s11n::detail::S11N::deserialize(is) }; } - return deserialize_arg>::exec(is, tag); } }; +template struct deserialize_runarg; + +template +struct deserialize_runarg { +static GRunArg exec(cv::gapi::s11n::IIStream& is, uint32_t idx) { + if (idx == GRunArg::index_of()) { + auto ptr = std::make_shared(); + ptr->deserialize(is); + return GRunArg { RMat(std::move(ptr)) }; + } else { // non-RMat arg - use default deserialization + GRunArg arg; + getRunArgByIdx(is, arg, idx); + return arg; + } +} +}; + template cv::GCompileArgs getCompileArgs(const std::vector &p) { std::unique_ptr pIs = cv::gapi::s11n::detail::getInStream(p); @@ -225,6 +318,23 @@ cv::GCompileArgs getCompileArgs(const std::vector &p) { return args; } + +template +cv::GRunArgs getRunArgsWithRMats(const std::vector &p) { + std::unique_ptr pIs = cv::gapi::s11n::detail::getInStream(p); + cv::gapi::s11n::IIStream& is = *pIs; + cv::GRunArgs args; + + uint32_t sz = 0; + is >> sz; + for (uint32_t i = 0; i < sz; ++i) { + uint32_t idx = 0; + is >> idx; + args.push_back(cv::gapi::detail::deserialize_runarg::exec(is, idx)); + } + + return args; +} } // namespace detail } // namespace gapi } // namespace cv diff --git a/modules/gapi/src/api/s11n.cpp b/modules/gapi/src/api/s11n.cpp index 52c276fd5d..b6acf28ea4 100644 --- a/modules/gapi/src/api/s11n.cpp +++ b/modules/gapi/src/api/s11n.cpp @@ -79,6 +79,9 @@ cv::GRunArgsP cv::gapi::bind(cv::GRunArgs &results) case T::index_of() : outputs.emplace_back(cv::util::get(res_obj)); break; + case cv::GRunArg::index_of() : + outputs.emplace_back((cv::RMat*)(&(cv::util::get(res_obj)))); + break; default: GAPI_Assert(false && "This value type is not supported!"); // ...maybe because of STANDALONE mode. break; @@ -112,6 +115,9 @@ cv::GRunArg cv::gapi::bind(cv::GRunArgP &out) case T::index_of() : return cv::GRunArg(*cv::util::get(out)); + case T::index_of() : + return cv::GRunArg(*cv::util::get(out)); + default: // ...maybe our types were extended GAPI_Assert(false && "This value type is UNKNOWN!"); diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp index ca73d29ffb..2b23b33cc8 100644 --- a/modules/gapi/src/backends/common/serialization.cpp +++ b/modules/gapi/src/backends/common/serialization.cpp @@ -165,12 +165,12 @@ IOStream& operator<< (IOStream& os, const cv::Scalar &s) { IIStream& operator>> (IIStream& is, cv::Scalar& s) { return is >> s.val[0] >> s.val[1] >> s.val[2] >> s.val[3]; } -IOStream& operator<< (IOStream& os, const cv::RMat&) { - util::throw_error(std::logic_error("Serialization of RMat is not supported")); +IOStream& operator<< (IOStream& os, const cv::RMat& mat) { + mat.serialize(os); return os; } IIStream& operator>> (IIStream& is, cv::RMat&) { - util::throw_error(std::logic_error("Serialization of RMat is not supported")); + util::throw_error(std::logic_error("operator>> for RMat should never be called")); return is; } diff --git a/modules/gapi/src/backends/common/serialization.hpp b/modules/gapi/src/backends/common/serialization.hpp index e2aa56c45b..a3134d84d2 100644 --- a/modules/gapi/src/backends/common/serialization.hpp +++ b/modules/gapi/src/backends/common/serialization.hpp @@ -88,26 +88,6 @@ GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GArrayDesc &); GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::GFrameDesc &); GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::GFrameDesc &); -#if !defined(GAPI_STANDALONE) -GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::UMat &); -GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::UMat &); -#endif // !defined(GAPI_STANDALONE) - -GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::RMat &r); -GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::RMat &r); - -GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gapi::wip::IStreamSource::Ptr &); -GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gapi::wip::IStreamSource::Ptr &); - -GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::VectorRef &); -GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::VectorRef &); - -GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::detail::OpaqueRef &); -GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::detail::OpaqueRef &); - -GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::MediaFrame &); -GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::MediaFrame &); - GAPI_EXPORTS IOStream& operator<< (IOStream& os, const cv::gimpl::RcDesc &rc); GAPI_EXPORTS IIStream& operator>> (IIStream& is, cv::gimpl::RcDesc &rc); @@ -178,46 +158,6 @@ GAPI_EXPORTS void serialize( IOStream& os GAPI_EXPORTS GSerialized deserialize(IIStream& is); GAPI_EXPORTS void reconstruct(const GSerialized &s, ade::Graph &g); -// Generic: variant serialization ////////////////////////////////////////////// -namespace detail { // FIXME: breaks old code -template -IOStream& put_v(IOStream&, const V&, std::size_t) { - GAPI_Assert(false && "variant>>: requested index is invalid"); -}; -template -IOStream& put_v(IOStream& os, const V& v, std::size_t x) { - return (x == 0u) - ? os << cv::util::get(v) - : put_v(os, v, x-1); -} -template -IIStream& get_v(IIStream&, V&, std::size_t, std::size_t) { - GAPI_Assert(false && "variant<<: requested index is invalid"); -} -template -IIStream& get_v(IIStream& is, V& v, std::size_t i, std::size_t gi) { - if (i == gi) { - X x{}; - is >> x; - v = std::move(x); - return is; - } else return get_v(is, v, i+1, gi); -} -} // namespace detail FIXME: breaks old code - -template -IOStream& operator<< (IOStream& os, const cv::util::variant &v) { - os << (uint32_t)v.index(); - return detail::put_v, Ts...>(os, v, v.index()); -} -template -IIStream& operator>> (IIStream& is, cv::util::variant &v) { - int idx = -1; - is >> idx; - GAPI_Assert(idx >= 0 && idx < (int)sizeof...(Ts)); - return detail::get_v, Ts...>(is, v, 0u, idx); -} - // FIXME: Basic Stream implementaions ////////////////////////////////////////// // Basic in-memory stream implementations. diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp index 1a4faec12c..3fe632e449 100644 --- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp +++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp @@ -1,6 +1,7 @@ #include "../test_precomp.hpp" #include "backends/common/serialization.hpp" +#include namespace { struct MyCustomType { @@ -45,6 +46,35 @@ template<> struct CompileArgTag { } // namespace detail } // namespace cv +namespace { +class MyRMatAdapter : public cv::RMat::Adapter { + cv::Mat m_mat; + int m_value; + std::string m_str; +public: + MyRMatAdapter() = default; + MyRMatAdapter(cv::Mat m, int value, const std::string& str) + : m_mat(m), m_value(value), m_str(str) + {} + virtual cv::RMat::View access(cv::RMat::Access access) override { + if (access == cv::RMat::Access::W) { + return cv::RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step); + } else { + return cv::RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step); + } + } + virtual cv::GMatDesc desc() const override { return cv::descr_of(m_mat); } + virtual void serialize(cv::gapi::s11n::IOStream& os) override { + os << m_value << m_str; + } + virtual void deserialize(cv::gapi::s11n::IIStream& is) override { + is >> m_value >> m_str; + } + int getVal() { return m_value; } + std::string getStr() { return m_str; } +}; +} + namespace opencv_test { struct S11N_Basic: public ::testing::Test { @@ -460,6 +490,39 @@ TEST_F(S11N_Basic, Test_Bind_RunArgs_MatScalar) { } } +TEST_F(S11N_Basic, Test_RunArg_RMat) { + cv::Mat mat = cv::Mat::eye(cv::Size(128, 64), CV_8UC3); + cv::RMat rmat = cv::make_rmat(mat, 42, "It actually works"); + auto v = cv::GRunArgs{ cv::GRunArg{ rmat } }; + + const std::vector sargsin = cv::gapi::serialize(v); + cv::GRunArgs out = cv::gapi::deserialize(sargsin); + cv::RMat out_mat = cv::util::get(out[0]); + auto adapter = out_mat.get(); + EXPECT_EQ(42, adapter->getVal()); + EXPECT_EQ("It actually works", adapter->getStr()); +} + +TEST_F(S11N_Basic, Test_RunArg_RMat_Scalar_Mat) { + cv::Mat mat = cv::Mat::eye(cv::Size(128, 64), CV_8UC3); + cv::RMat rmat = cv::make_rmat(mat, 42, "It actually works"); + cv::Scalar sc(111); + auto v = cv::GRunArgs{ cv::GRunArg{ rmat }, cv::GRunArg{ sc }, cv::GRunArg{ mat } }; + + const std::vector sargsin = cv::gapi::serialize(v); + cv::GRunArgs out = cv::gapi::deserialize(sargsin); + cv::RMat out_rmat = cv::util::get(out[0]); + auto adapter = out_rmat.get(); + EXPECT_EQ(42, adapter->getVal()); + EXPECT_EQ("It actually works", adapter->getStr()); + + cv::Scalar out_sc = cv::util::get(out[1]); + EXPECT_EQ(sc, out_sc); + + cv::Mat out_mat = cv::util::get(out[2]); + EXPECT_EQ(0, cv::norm(mat, out_mat)); +} + namespace { template bool verifyOpaqueKind(T&& in) { From 510dc17c2e3f408de1349e342be4cda5cd6873f8 Mon Sep 17 00:00:00 2001 From: Anna Khakimova Date: Wed, 21 Oct 2020 13:52:03 +0300 Subject: [PATCH 041/152] Merge pull request #18338 from anna-khakimova:ak/opt_arithm_kernel Univ Intrinsics implementation of Add, Sub, Absdiff kernels * Add, sub, absdiff kernels optimization * avoid unused conditions * add conditions for tail processing --- .../gapi/src/backends/fluid/gfluidcore.cpp | 423 ++++++++++++++++-- 1 file changed, 398 insertions(+), 25 deletions(-) diff --git a/modules/gapi/src/backends/fluid/gfluidcore.cpp b/modules/gapi/src/backends/fluid/gfluidcore.cpp index a6f8d56e4c..edc91f0179 100644 --- a/modules/gapi/src/backends/fluid/gfluidcore.cpp +++ b/modules/gapi/src/backends/fluid/gfluidcore.cpp @@ -151,6 +151,348 @@ GAPI_FLUID_KERNEL(GFluidAddW, cv::gapi::core::GAddW, false) enum Arithm { ARITHM_ABSDIFF, ARITHM_ADD, ARITHM_SUBTRACT, ARITHM_MULTIPLY, ARITHM_DIVIDE }; +#if CV_SIMD +CV_ALWAYS_INLINE void absdiff_store(short out[], const v_int16& a, const v_int16& b, int x) +{ + vx_store(&out[x], v_absdiffs(a, b)); +} + +CV_ALWAYS_INLINE void absdiff_store(ushort out[], const v_uint16& a, const v_uint16& b, int x) +{ + vx_store(&out[x], v_absdiff(a, b)); +} + +CV_ALWAYS_INLINE void absdiff_store(uchar out[], const v_uint8& a, const v_uint8& b, int x) +{ + vx_store(&out[x], v_absdiff(a, b)); +} + +CV_ALWAYS_INLINE void absdiff_store(float out[], const v_float32& a, const v_float32& b, int x) +{ + vx_store(&out[x], v_absdiff(a, b)); +} + +template +CV_ALWAYS_INLINE int absdiff_impl(const T in1[], const T in2[], T out[], int length) +{ + constexpr int nlanes = static_cast(VT::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + VT a = vx_load(&in1[x]); + VT b = vx_load(&in2[x]); + absdiff_store(out, a, b, x); + } + + if (x < length && (in1 != out) && (in2 != out)) + { + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; +} + +template +CV_ALWAYS_INLINE int absdiff_simd(const T in1[], const T in2[], T out[], int length) +{ + if (std::is_same::value) + { + return absdiff_impl(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return absdiff_impl(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return absdiff_impl(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return absdiff_impl(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + + return 0; +} + +template +CV_ALWAYS_INLINE int add_simd_sametype(const T in1[], const T in2[], T out[], int length) +{ + constexpr int nlanes = static_cast(VT::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + VT a = vx_load(&in1[x]); + VT b = vx_load(&in2[x]); + vx_store(&out[x], a + b); + } + + if (x < length && (in1 != out) && (in2 != out)) + { + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; +} + +template +CV_ALWAYS_INLINE int add_simd(const SRC in1[], const SRC in2[], DST out[], int length) +{ + if (std::is_same::value && !std::is_same::value) + return 0; + + if (std::is_same::value) + { + if (std::is_same::value) + { + return add_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return add_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return add_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + } + else if (std::is_same::value && std::is_same::value) + { + constexpr int nlanes = static_cast(v_uint8::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + v_int16 a1 = vx_load(reinterpret_cast(&in1[x])); + v_int16 a2 = vx_load(reinterpret_cast(&in1[x + nlanes / 2])); + v_int16 b1 = vx_load(reinterpret_cast(&in2[x])); + v_int16 b2 = vx_load(reinterpret_cast(&in2[x + nlanes / 2])); + + vx_store(reinterpret_cast(&out[x]), v_pack_u(a1 + b1, a2 + b2)); + } + + if (x < length) + { + CV_DbgAssert((reinterpret_cast(in1) != reinterpret_cast(out)) && + (reinterpret_cast(in2) != reinterpret_cast(out))); + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; + } + else if (std::is_same::value && std::is_same::value) + { + constexpr int nlanes = static_cast(v_uint8::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + v_float32 a1 = vx_load(reinterpret_cast(&in1[x])); + v_float32 a2 = vx_load(reinterpret_cast(&in1[x + nlanes / 4])); + v_float32 a3 = vx_load(reinterpret_cast(&in1[x + 2 * nlanes / 4])); + v_float32 a4 = vx_load(reinterpret_cast(&in1[x + 3 * nlanes / 4])); + + v_float32 b1 = vx_load(reinterpret_cast(&in2[x])); + v_float32 b2 = vx_load(reinterpret_cast(&in2[x + nlanes / 4])); + v_float32 b3 = vx_load(reinterpret_cast(&in2[x + 2 * nlanes / 4])); + v_float32 b4 = vx_load(reinterpret_cast(&in2[x + 3 * nlanes / 4])); + + vx_store(reinterpret_cast(&out[x]), v_pack_u(v_pack(v_round(a1 + b1), v_round(a2 + b2)), + v_pack(v_round(a3 + b3), v_round(a4 + b4)))); + } + + if (x < length) + { + CV_DbgAssert((reinterpret_cast(in1) != reinterpret_cast(out)) && + (reinterpret_cast(in2) != reinterpret_cast(out))); + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; + } + + return 0; +} + +template +CV_ALWAYS_INLINE int sub_simd_sametype(const T in1[], const T in2[], T out[], int length) +{ + constexpr int nlanes = static_cast(VT::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + VT a = vx_load(&in1[x]); + VT b = vx_load(&in2[x]); + vx_store(&out[x], a - b); + } + + if (x < length && (in1 != out) && (in2 != out)) + { + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; +} + +template +CV_ALWAYS_INLINE int sub_simd(const SRC in1[], const SRC in2[], DST out[], int length) +{ + if (std::is_same::value && !std::is_same::value) + return 0; + + if (std::is_same::value) + { + if (std::is_same::value) + { + return sub_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return sub_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + else if (std::is_same::value) + { + return sub_simd_sametype(reinterpret_cast(in1), + reinterpret_cast(in2), + reinterpret_cast(out), length); + } + } + else if (std::is_same::value && std::is_same::value) + { + constexpr int nlanes = static_cast(v_uint8::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + v_int16 a1 = vx_load(reinterpret_cast(&in1[x])); + v_int16 a2 = vx_load(reinterpret_cast(&in1[x + nlanes / 2])); + v_int16 b1 = vx_load(reinterpret_cast(&in2[x])); + v_int16 b2 = vx_load(reinterpret_cast(&in2[x + nlanes / 2])); + + vx_store(reinterpret_cast(&out[x]), v_pack_u(a1 - b1, a2 - b2)); + } + + if (x < length) + { + CV_DbgAssert((reinterpret_cast(in1) != reinterpret_cast(out)) && + (reinterpret_cast(in2) != reinterpret_cast(out))); + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; + } + else if (std::is_same::value && std::is_same::value) + { + constexpr int nlanes = static_cast(v_uint8::nlanes); + + if (length < nlanes) + return 0; + + int x = 0; + for (;;) + { + for (; x <= length - nlanes; x += nlanes) + { + v_float32 a1 = vx_load(reinterpret_cast(&in1[x])); + v_float32 a2 = vx_load(reinterpret_cast(&in1[x + nlanes / 4])); + v_float32 a3 = vx_load(reinterpret_cast(&in1[x + 2 * nlanes / 4])); + v_float32 a4 = vx_load(reinterpret_cast(&in1[x + 3 * nlanes / 4])); + + v_float32 b1 = vx_load(reinterpret_cast(&in2[x])); + v_float32 b2 = vx_load(reinterpret_cast(&in2[x + nlanes / 4])); + v_float32 b3 = vx_load(reinterpret_cast(&in2[x + 2 * nlanes / 4])); + v_float32 b4 = vx_load(reinterpret_cast(&in2[x + 3 * nlanes / 4])); + + vx_store(reinterpret_cast(&out[x]), v_pack_u(v_pack(v_round(a1 - b1), v_round(a2 - b2)), + v_pack(v_round(a3 - b3), v_round(a4 - b4)))); + } + + if (x < length) + { + CV_DbgAssert((reinterpret_cast(in1) != reinterpret_cast(out)) && + (reinterpret_cast(in2) != reinterpret_cast(out))); + x = length - nlanes; + continue; // process one more time (unaligned tail) + } + break; + } + + return x; + } + + return 0; +} +#endif + template static void run_arithm(Buffer &dst, const View &src1, const View &src2, Arithm arithm, double scale=1) @@ -168,29 +510,37 @@ static void run_arithm(Buffer &dst, const View &src1, const View &src2, Arithm a // NB: assume in/out types are not 64-bits float _scale = static_cast( scale ); + int x = 0; + switch (arithm) { - case ARITHM_ABSDIFF: - for (int l=0; l < length; l++) - out[l] = absdiff(in1[l], in2[l]); - break; - case ARITHM_ADD: - for (int l=0; l < length; l++) - out[l] = add(in1[l], in2[l]); - break; - case ARITHM_SUBTRACT: - for (int l=0; l < length; l++) - out[l] = sub(in1[l], in2[l]); - break; - case ARITHM_MULTIPLY: - for (int l=0; l < length; l++) - out[l] = mul(in1[l], in2[l], _scale); - break; - case ARITHM_DIVIDE: - for (int l=0; l < length; l++) - out[l] = div(in1[l], in2[l], _scale); - break; - default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation"); + case ARITHM_ADD: + { +#if CV_SIMD + x = add_simd(in1, in2, out, length); +#endif + for (; x < length; ++x) + out[x] = add(in1[x], in2[x]); + break; + } + case ARITHM_SUBTRACT: + { +#if CV_SIMD + x = sub_simd(in1, in2, out, length); +#endif + for (; x < length; ++x) + out[x] = sub(in1[x], in2[x]); + break; + } + case ARITHM_MULTIPLY: + for (; x < length; ++x) + out[x] = mul(in1[x], in2[x], _scale); + break; + case ARITHM_DIVIDE: + for (; x < length; ++x) + out[x] = div(in1[x], in2[x], _scale); + break; + default: CV_Error(cv::Error::StsBadArg, "unsupported arithmetic operation"); } } @@ -270,6 +620,29 @@ GAPI_FLUID_KERNEL(GFluidDiv, cv::gapi::core::GDiv, false) } }; +template +static void run_absdiff(Buffer &dst, const View &src1, const View &src2) +{ + static_assert(std::is_same::value, "wrong types"); + static_assert(std::is_same::value, "wrong types"); + + const auto *in1 = src1.InLine(0); + const auto *in2 = src2.InLine(0); + auto *out = dst.OutLine(); + + int width = dst.length(); + int chan = dst.meta().chan; + int length = width * chan; + + int x = 0; + +#if CV_SIMD + x = absdiff_simd(in1, in2, out, length); +#endif + for (; x < length; ++x) + out[x] = absdiff(in1[x], in2[x]); +} + GAPI_FLUID_KERNEL(GFluidAbsDiff, cv::gapi::core::GAbsDiff, false) { static const int Window = 1; @@ -277,10 +650,10 @@ GAPI_FLUID_KERNEL(GFluidAbsDiff, cv::gapi::core::GAbsDiff, false) static void run(const View &src1, const View &src2, Buffer &dst) { // DST SRC1 SRC2 OP __VA_ARGS__ - BINARY_(uchar , uchar , uchar , run_arithm, dst, src1, src2, ARITHM_ABSDIFF); - BINARY_(ushort, ushort, ushort, run_arithm, dst, src1, src2, ARITHM_ABSDIFF); - BINARY_( short, short, short, run_arithm, dst, src1, src2, ARITHM_ABSDIFF); - BINARY_( float, float, float, run_arithm, dst, src1, src2, ARITHM_ABSDIFF); + BINARY_(uchar , uchar , uchar , run_absdiff, dst, src1, src2); + BINARY_(ushort, ushort, ushort, run_absdiff, dst, src1, src2); + BINARY_( short, short, short, run_absdiff, dst, src1, src2); + BINARY_( float, float, float, run_absdiff, dst, src1, src2); CV_Error(cv::Error::StsBadArg, "unsupported combination of types"); } From 95ff9282286a55bcbb5a241dc30629406b50dd88 Mon Sep 17 00:00:00 2001 From: Dmitry Matveev Date: Thu, 1 Oct 2020 00:18:04 +0300 Subject: [PATCH 042/152] G-API: Introduced a Text Detection sample This sample models the Text Detection demo from OMZ: https://github.com/openvinotoolkit/open_model_zoo/tree/2020.4/demos/text_detection_demo Also: renamed cv::gapi::size() to cv::gapi::streaming::size() --- modules/gapi/include/opencv2/gapi/core.hpp | 32 +- .../gapi/include/opencv2/gapi/gstreaming.hpp | 16 + .../include/opencv2/gapi/infer/parsers.hpp | 12 + .../perf/common/gapi_core_perf_tests_inl.hpp | 4 +- modules/gapi/samples/text_detection.cpp | 698 ++++++++++++++++++ modules/gapi/src/api/kernels_core.cpp | 8 +- modules/gapi/src/backends/cpu/gcpucore.cpp | 4 +- .../gapi/test/common/gapi_core_tests_inl.hpp | 4 +- 8 files changed, 755 insertions(+), 23 deletions(-) create mode 100644 modules/gapi/samples/text_detection.cpp diff --git a/modules/gapi/include/opencv2/gapi/core.hpp b/modules/gapi/include/opencv2/gapi/core.hpp index 2c01328f09..8825585696 100644 --- a/modules/gapi/include/opencv2/gapi/core.hpp +++ b/modules/gapi/include/opencv2/gapi/core.hpp @@ -508,19 +508,23 @@ namespace core { return in.withType(in.depth, in.chan).withSize(dsize); } }; +} // namespace core - G_TYPED_KERNEL(GSize, (GMat)>, "org.opencv.core.size") { - static GOpaqueDesc outMeta(const GMatDesc&) { - return empty_gopaque_desc(); - } - }; +namespace streaming { - G_TYPED_KERNEL(GSizeR, (GOpaque)>, "org.opencv.core.sizeR") { - static GOpaqueDesc outMeta(const GOpaqueDesc&) { - return empty_gopaque_desc(); - } - }; -} +// Operations for Streaming (declared in this header for convenience) +G_TYPED_KERNEL(GSize, (GMat)>, "org.opencv.streaming.size") { + static GOpaqueDesc outMeta(const GMatDesc&) { + return empty_gopaque_desc(); + } +}; + +G_TYPED_KERNEL(GSizeR, (GOpaque)>, "org.opencv.streaming.sizeR") { + static GOpaqueDesc outMeta(const GOpaqueDesc&) { + return empty_gopaque_desc(); + } +}; +} // namespace streaming //! @addtogroup gapi_math //! @{ @@ -1753,9 +1757,10 @@ GAPI_EXPORTS GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, i int borderMode = cv::BORDER_CONSTANT, const Scalar& borderValue = Scalar()); //! @} gapi_transform +namespace streaming { /** @brief Gets dimensions from Mat. -@note Function textual ID is "org.opencv.core.size" +@note Function textual ID is "org.opencv.streaming.size" @param src Input tensor @return Size (tensor dimensions). @@ -1765,12 +1770,13 @@ GAPI_EXPORTS GOpaque size(const GMat& src); /** @overload Gets dimensions from rectangle. -@note Function textual ID is "org.opencv.core.sizeR" +@note Function textual ID is "org.opencv.streaming.sizeR" @param r Input rectangle. @return Size (rectangle dimensions). */ GAPI_EXPORTS GOpaque size(const GOpaque& r); +} //namespace streaming } //namespace gapi } //namespace cv diff --git a/modules/gapi/include/opencv2/gapi/gstreaming.hpp b/modules/gapi/include/opencv2/gapi/gstreaming.hpp index f45c30bdae..037fa94452 100644 --- a/modules/gapi/include/opencv2/gapi/gstreaming.hpp +++ b/modules/gapi/include/opencv2/gapi/gstreaming.hpp @@ -111,6 +111,22 @@ public: */ GAPI_WRAP void setSource(const gapi::wip::IStreamSource::Ptr& s); + /** + * @brief Constructs and specifies an input video stream for a + * single-input computation pipeline with the given parameters. + * + * Throws if pipeline is already running. Use stop() and then + * setSource() to run the graph on a new video stream. + * + * @overload + * @param args arguments used to contruct and initialize a stream + * source. + */ + template + void setSource(Args&&... args) { + setSource(cv::gapi::wip::make_src(std::forward(args)...)); + } + /** * @brief Start the pipeline execution. * diff --git a/modules/gapi/include/opencv2/gapi/infer/parsers.hpp b/modules/gapi/include/opencv2/gapi/infer/parsers.hpp index c3488f5799..15742c6e55 100644 --- a/modules/gapi/include/opencv2/gapi/infer/parsers.hpp +++ b/modules/gapi/include/opencv2/gapi/infer/parsers.hpp @@ -122,4 +122,16 @@ GAPI_EXPORTS std::tuple, GArray> parseYolo(const GMat& in, } // namespace gapi } // namespace cv +// Reimport parseSSD & parseYolo under their initial namespace +namespace cv { +namespace gapi { +namespace streaming { + +using cv::gapi::parseSSD; +using cv::gapi::parseYolo; + +} // namespace streaming +} // namespace gapi +} // namespace cv + #endif // OPENCV_GAPI_PARSERS_HPP diff --git a/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp b/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp index 91d08bba06..ac90181184 100644 --- a/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp +++ b/modules/gapi/perf/common/gapi_core_perf_tests_inl.hpp @@ -2124,7 +2124,7 @@ PERF_TEST_P_(SizePerfTest, TestPerformance) // G-API code ////////////////////////////////////////////////////////////// cv::GMat in; - auto out = cv::gapi::size(in); + auto out = cv::gapi::streaming::size(in); cv::GComputation c(cv::GIn(in), cv::GOut(out)); cv::Size out_sz; @@ -2156,7 +2156,7 @@ PERF_TEST_P_(SizeRPerfTest, TestPerformance) // G-API code ////////////////////////////////////////////////////////////// cv::GOpaque op_rect; - auto out = cv::gapi::size(op_rect); + auto out = cv::gapi::streaming::size(op_rect); cv::GComputation c(cv::GIn(op_rect), cv::GOut(out)); cv::Size out_sz; diff --git a/modules/gapi/samples/text_detection.cpp b/modules/gapi/samples/text_detection.cpp new file mode 100644 index 0000000000..da1bab6ca9 --- /dev/null +++ b/modules/gapi/samples/text_detection.cpp @@ -0,0 +1,698 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include + +const std::string about = + "This is an OpenCV-based version of OMZ Text Detection example"; +const std::string keys = + "{ h help | | Print this help message }" + "{ input | | Path to the input video file }" + "{ tdm | text-detection-0004.xml | Path to OpenVINO text detection model (.xml), versions 0003 and 0004 work }" + "{ tdd | CPU | Target device for the text detector (e.g. CPU, GPU, VPU, ...) }" + "{ trm | text-recognition-0012.xml | Path to OpenVINO text recognition model (.xml) }" + "{ trd | CPU | Target device for the text recognition (e.g. CPU, GPU, VPU, ...) }" + "{ bw | 0 | CTC beam search decoder bandwidth, if 0, a CTC greedy decoder is used}" + "{ sset | 0123456789abcdefghijklmnopqrstuvwxyz | Symbol set to use with text recognition decoder. Shouldn't contain symbol #. }" + "{ thr | 0.2 | Text recognition confidence threshold}" + ; + +namespace { +std::string weights_path(const std::string &model_path) { + const auto EXT_LEN = 4u; + const auto sz = model_path.size(); + CV_Assert(sz > EXT_LEN); + + const auto ext = model_path.substr(sz - EXT_LEN); + CV_Assert(cv::toLowerCase(ext) == ".xml"); + return model_path.substr(0u, sz - EXT_LEN) + ".bin"; +} + +////////////////////////////////////////////////////////////////////// +// Taken from OMZ samples as-is +template +void softmax_and_choose(Iter begin, Iter end, int *argmax, float *prob) { + auto max_element = std::max_element(begin, end); + *argmax = static_cast(std::distance(begin, max_element)); + float max_val = *max_element; + double sum = 0; + for (auto i = begin; i != end; i++) { + sum += std::exp((*i) - max_val); + } + if (std::fabs(sum) < std::numeric_limits::epsilon()) { + throw std::logic_error("sum can't be equal to zero"); + } + *prob = 1.0f / static_cast(sum); +} + +template +std::vector softmax(Iter begin, Iter end) { + std::vector prob(end - begin, 0.f); + std::transform(begin, end, prob.begin(), [](float x) { return std::exp(x); }); + float sum = std::accumulate(prob.begin(), prob.end(), 0.0f); + for (int i = 0; i < static_cast(prob.size()); i++) + prob[i] /= sum; + return prob; +} + +struct BeamElement { + std::vector sentence; //!< The sequence of chars that will be a result of the beam element + + float prob_blank; //!< The probability that the last char in CTC sequence + //!< for the beam element is the special blank char + + float prob_not_blank; //!< The probability that the last char in CTC sequence + //!< for the beam element is NOT the special blank char + + float prob() const { //!< The probability of the beam element. + return prob_blank + prob_not_blank; + } +}; + +std::string CTCGreedyDecoder(const float *data, + const std::size_t sz, + const std::string &alphabet, + const char pad_symbol, + double *conf) { + std::string res = ""; + bool prev_pad = false; + *conf = 1; + + const auto num_classes = alphabet.length(); + for (auto it = data; it != (data+sz); it += num_classes) { + int argmax = 0; + float prob = 0.f; + + softmax_and_choose(it, it + num_classes, &argmax, &prob); + (*conf) *= prob; + + auto symbol = alphabet[argmax]; + if (symbol != pad_symbol) { + if (res.empty() || prev_pad || (!res.empty() && symbol != res.back())) { + prev_pad = false; + res += symbol; + } + } else { + prev_pad = true; + } + } + return res; +} + +std::string CTCBeamSearchDecoder(const float *data, + const std::size_t sz, + const std::string &alphabet, + double *conf, + int bandwidth) { + const auto num_classes = alphabet.length(); + + std::vector curr; + std::vector last; + + last.push_back(BeamElement{std::vector(), 1.f, 0.f}); + + for (auto it = data; it != (data+sz); it += num_classes) { + curr.clear(); + + std::vector prob = softmax(it, it + num_classes); + + for(const auto& candidate: last) { + float prob_not_blank = 0.f; + const std::vector& candidate_sentence = candidate.sentence; + if (!candidate_sentence.empty()) { + int n = candidate_sentence.back(); + prob_not_blank = candidate.prob_not_blank * prob[n]; + } + float prob_blank = candidate.prob() * prob[num_classes - 1]; + + auto check_res = std::find_if(curr.begin(), + curr.end(), + [&candidate_sentence](const BeamElement& n) { + return n.sentence == candidate_sentence; + }); + if (check_res == std::end(curr)) { + curr.push_back(BeamElement{candidate.sentence, prob_blank, prob_not_blank}); + } else { + check_res->prob_not_blank += prob_not_blank; + if (check_res->prob_blank != 0.f) { + throw std::logic_error("Probability that the last char in CTC-sequence " + "is the special blank char must be zero here"); + } + check_res->prob_blank = prob_blank; + } + + for (int i = 0; i < static_cast(num_classes) - 1; i++) { + auto extend = candidate_sentence; + extend.push_back(i); + + if (candidate_sentence.size() > 0 && candidate.sentence.back() == i) { + prob_not_blank = prob[i] * candidate.prob_blank; + } else { + prob_not_blank = prob[i] * candidate.prob(); + } + + auto check_res2 = std::find_if(curr.begin(), + curr.end(), + [&extend](const BeamElement &n) { + return n.sentence == extend; + }); + if (check_res2 == std::end(curr)) { + curr.push_back(BeamElement{extend, 0.f, prob_not_blank}); + } else { + check_res2->prob_not_blank += prob_not_blank; + } + } + } + + sort(curr.begin(), curr.end(), [](const BeamElement &a, const BeamElement &b) -> bool { + return a.prob() > b.prob(); + }); + + last.clear(); + int num_to_copy = std::min(bandwidth, static_cast(curr.size())); + for (int b = 0; b < num_to_copy; b++) { + last.push_back(curr[b]); + } + } + + *conf = last[0].prob(); + std::string res=""; + for (const auto& idx: last[0].sentence) { + res += alphabet[idx]; + } + + return res; +} + +////////////////////////////////////////////////////////////////////// +} // anonymous namespace + +namespace custom { +namespace { + +////////////////////////////////////////////////////////////////////// +// Define networks for this sample +using GMat2 = std::tuple; +G_API_NET(TextDetection, + , + "sample.custom.text_detect"); + +G_API_NET(TextRecognition, + , + "sample.custom.text_recogn"); + +// Define custom operations +using GSize = cv::GOpaque; +using GRRects = cv::GArray; +G_API_OP(PostProcess, + , + "sample.custom.text.post_proc") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, + const cv::GMatDesc &, + const cv::GOpaqueDesc &, + float, + float) { + return cv::empty_array_desc(); + } +}; + +using GMats = cv::GArray; +G_API_OP(CropLabels, + , + "sample.custom.text.crop") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, + const cv::GArrayDesc &, + const cv::GOpaqueDesc &) { + return cv::empty_array_desc(); + } +}; + +////////////////////////////////////////////////////////////////////// +// Implement custom operations +GAPI_OCV_KERNEL(OCVPostProcess, PostProcess) { + static void run(const cv::Mat &link, + const cv::Mat &segm, + const cv::Size &img_size, + const float link_threshold, + const float segm_threshold, + std::vector &out) { + // NOTE: Taken from the OMZ text detection sample almost as-is + const int kMinArea = 300; + const int kMinHeight = 10; + + const float *link_data_pointer = link.ptr(); + std::vector link_data(link_data_pointer, link_data_pointer + link.total()); + link_data = transpose4d(link_data, dimsToShape(link.size), {0, 2, 3, 1}); + softmax(link_data); + link_data = sliceAndGetSecondChannel(link_data); + std::vector new_link_data_shape = { + link.size[0], + link.size[2], + link.size[3], + link.size[1]/2, + }; + + const float *cls_data_pointer = segm.ptr(); + std::vector cls_data(cls_data_pointer, cls_data_pointer + segm.total()); + cls_data = transpose4d(cls_data, dimsToShape(segm.size), {0, 2, 3, 1}); + softmax(cls_data); + cls_data = sliceAndGetSecondChannel(cls_data); + std::vector new_cls_data_shape = { + segm.size[0], + segm.size[2], + segm.size[3], + segm.size[1]/2, + }; + + out = maskToBoxes(decodeImageByJoin(cls_data, new_cls_data_shape, + link_data, new_link_data_shape, + segm_threshold, link_threshold), + static_cast(kMinArea), + static_cast(kMinHeight), + img_size); + } + + static std::vector dimsToShape(const cv::MatSize &sz) { + const int n_dims = sz.dims(); + std::vector result; + result.reserve(n_dims); + + // cv::MatSize is not iterable... + for (int i = 0; i < n_dims; i++) { + result.emplace_back(static_cast(sz[i])); + } + return result; + } + + static void softmax(std::vector &rdata) { + // NOTE: Taken from the OMZ text detection sample almost as-is + const size_t last_dim = 2; + for (size_t i = 0 ; i < rdata.size(); i+=last_dim) { + float m = std::max(rdata[i], rdata[i+1]); + rdata[i] = std::exp(rdata[i] - m); + rdata[i + 1] = std::exp(rdata[i + 1] - m); + float s = rdata[i] + rdata[i + 1]; + rdata[i] /= s; + rdata[i + 1] /= s; + } + } + + static std::vector transpose4d(const std::vector &data, + const std::vector &shape, + const std::vector &axes) { + // NOTE: Taken from the OMZ text detection sample almost as-is + if (shape.size() != axes.size()) + throw std::runtime_error("Shape and axes must have the same dimension."); + + for (size_t a : axes) { + if (a >= shape.size()) + throw std::runtime_error("Axis must be less than dimension of shape."); + } + size_t total_size = shape[0]*shape[1]*shape[2]*shape[3]; + std::vector steps { + shape[axes[1]]*shape[axes[2]]*shape[axes[3]], + shape[axes[2]]*shape[axes[3]], + shape[axes[3]], + 1 + }; + + size_t source_data_idx = 0; + std::vector new_data(total_size, 0); + std::vector ids(shape.size()); + for (ids[0] = 0; ids[0] < shape[0]; ids[0]++) { + for (ids[1] = 0; ids[1] < shape[1]; ids[1]++) { + for (ids[2] = 0; ids[2] < shape[2]; ids[2]++) { + for (ids[3]= 0; ids[3] < shape[3]; ids[3]++) { + size_t new_data_idx = ids[axes[0]]*steps[0] + ids[axes[1]]*steps[1] + + ids[axes[2]]*steps[2] + ids[axes[3]]*steps[3]; + new_data[new_data_idx] = data[source_data_idx++]; + } + } + } + } + return new_data; + } + + static std::vector sliceAndGetSecondChannel(const std::vector &data) { + // NOTE: Taken from the OMZ text detection sample almost as-is + std::vector new_data(data.size() / 2, 0); + for (size_t i = 0; i < data.size() / 2; i++) { + new_data[i] = data[2 * i + 1]; + } + return new_data; + } + + static void join(const int p1, + const int p2, + std::unordered_map &group_mask) { + // NOTE: Taken from the OMZ text detection sample almost as-is + const int root1 = findRoot(p1, group_mask); + const int root2 = findRoot(p2, group_mask); + if (root1 != root2) { + group_mask[root1] = root2; + } + } + + static cv::Mat decodeImageByJoin(const std::vector &cls_data, + const std::vector &cls_data_shape, + const std::vector &link_data, + const std::vector &link_data_shape, + float cls_conf_threshold, + float link_conf_threshold) { + // NOTE: Taken from the OMZ text detection sample almost as-is + const int h = cls_data_shape[1]; + const int w = cls_data_shape[2]; + + std::vector pixel_mask(h * w, 0); + std::unordered_map group_mask; + std::vector points; + for (int i = 0; i < static_cast(pixel_mask.size()); i++) { + pixel_mask[i] = cls_data[i] >= cls_conf_threshold; + if (pixel_mask[i]) { + points.emplace_back(i % w, i / w); + group_mask[i] = -1; + } + } + std::vector link_mask(link_data.size(), 0); + for (size_t i = 0; i < link_mask.size(); i++) { + link_mask[i] = link_data[i] >= link_conf_threshold; + } + size_t neighbours = size_t(link_data_shape[3]); + for (const auto &point : points) { + size_t neighbour = 0; + for (int ny = point.y - 1; ny <= point.y + 1; ny++) { + for (int nx = point.x - 1; nx <= point.x + 1; nx++) { + if (nx == point.x && ny == point.y) + continue; + if (nx >= 0 && nx < w && ny >= 0 && ny < h) { + uchar pixel_value = pixel_mask[size_t(ny) * size_t(w) + size_t(nx)]; + uchar link_value = link_mask[(size_t(point.y) * size_t(w) + size_t(point.x)) + *neighbours + neighbour]; + if (pixel_value && link_value) { + join(point.x + point.y * w, nx + ny * w, group_mask); + } + } + neighbour++; + } + } + } + return get_all(points, w, h, group_mask); + } + + static cv::Mat get_all(const std::vector &points, + const int w, + const int h, + std::unordered_map &group_mask) { + // NOTE: Taken from the OMZ text detection sample almost as-is + std::unordered_map root_map; + cv::Mat mask(h, w, CV_32S, cv::Scalar(0)); + for (const auto &point : points) { + int point_root = findRoot(point.x + point.y * w, group_mask); + if (root_map.find(point_root) == root_map.end()) { + root_map.emplace(point_root, static_cast(root_map.size() + 1)); + } + mask.at(point.x + point.y * w) = root_map[point_root]; + } + return mask; + } + + static int findRoot(const int point, + std::unordered_map &group_mask) { + // NOTE: Taken from the OMZ text detection sample almost as-is + int root = point; + bool update_parent = false; + while (group_mask.at(root) != -1) { + root = group_mask.at(root); + update_parent = true; + } + if (update_parent) { + group_mask[point] = root; + } + return root; + } + + static std::vector maskToBoxes(const cv::Mat &mask, + const float min_area, + const float min_height, + const cv::Size &image_size) { + // NOTE: Taken from the OMZ text detection sample almost as-is + std::vector bboxes; + double min_val = 0.; + double max_val = 0.; + cv::minMaxLoc(mask, &min_val, &max_val); + int max_bbox_idx = static_cast(max_val); + cv::Mat resized_mask; + cv::resize(mask, resized_mask, image_size, 0, 0, cv::INTER_NEAREST); + + for (int i = 1; i <= max_bbox_idx; i++) { + cv::Mat bbox_mask = resized_mask == i; + std::vector> contours; + + cv::findContours(bbox_mask, contours, cv::RETR_CCOMP, cv::CHAIN_APPROX_SIMPLE); + if (contours.empty()) + continue; + cv::RotatedRect r = cv::minAreaRect(contours[0]); + if (std::min(r.size.width, r.size.height) < min_height) + continue; + if (r.size.area() < min_area) + continue; + bboxes.emplace_back(r); + } + return bboxes; + } +}; // GAPI_OCV_KERNEL(PostProcess) + +GAPI_OCV_KERNEL(OCVCropLabels, CropLabels) { + static void run(const cv::Mat &image, + const std::vector &detections, + const cv::Size &outSize, + std::vector &out) { + out.clear(); + out.reserve(detections.size()); + cv::Mat crop(outSize, CV_8UC3, cv::Scalar(0)); + cv::Mat gray(outSize, CV_8UC1, cv::Scalar(0)); + std::vector blob_shape = {1,1,outSize.height,outSize.width}; + + for (auto &&rr : detections) { + std::vector points(4); + rr.points(points.data()); + + const auto top_left_point_idx = topLeftPointIdx(points); + cv::Point2f point0 = points[static_cast(top_left_point_idx)]; + cv::Point2f point1 = points[(top_left_point_idx + 1) % 4]; + cv::Point2f point2 = points[(top_left_point_idx + 2) % 4]; + + std::vector from{point0, point1, point2}; + std::vector to{ + cv::Point2f(0.0f, 0.0f), + cv::Point2f(static_cast(outSize.width-1), 0.0f), + cv::Point2f(static_cast(outSize.width-1), + static_cast(outSize.height-1)) + }; + cv::Mat M = cv::getAffineTransform(from, to); + cv::warpAffine(image, crop, M, outSize); + cv::cvtColor(crop, gray, cv::COLOR_BGR2GRAY); + + cv::Mat blob; + gray.convertTo(blob, CV_32F); + out.push_back(blob.reshape(1, blob_shape)); // pass as 1,1,H,W instead of H,W + } + } + + static int topLeftPointIdx(const std::vector &points) { + // NOTE: Taken from the OMZ text detection sample almost as-is + cv::Point2f most_left(std::numeric_limits::max(), + std::numeric_limits::max()); + cv::Point2f almost_most_left(std::numeric_limits::max(), + std::numeric_limits::max()); + int most_left_idx = -1; + int almost_most_left_idx = -1; + + for (size_t i = 0; i < points.size() ; i++) { + if (most_left.x > points[i].x) { + if (most_left.x < std::numeric_limits::max()) { + almost_most_left = most_left; + almost_most_left_idx = most_left_idx; + } + most_left = points[i]; + most_left_idx = static_cast(i); + } + if (almost_most_left.x > points[i].x && points[i] != most_left) { + almost_most_left = points[i]; + almost_most_left_idx = static_cast(i); + } + } + + if (almost_most_left.y < most_left.y) { + most_left = almost_most_left; + most_left_idx = almost_most_left_idx; + } + return most_left_idx; + } + +}; // GAPI_OCV_KERNEL(CropLabels) + +} // anonymous namespace +} // namespace custom + +namespace vis { +namespace { + +void drawRotatedRect(cv::Mat &m, const cv::RotatedRect &rc) { + std::vector tmp_points(5); + rc.points(tmp_points.data()); + tmp_points[4] = tmp_points[0]; + auto prev = tmp_points.begin(), it = prev+1; + for (; it != tmp_points.end(); ++it) { + cv::line(m, *prev, *it, cv::Scalar(50, 205, 50), 2); + prev = it; + } +} + +void drawText(cv::Mat &m, const cv::RotatedRect &rc, const std::string &str) { + const int fface = cv::FONT_HERSHEY_SIMPLEX; + const double scale = 0.7; + const int thick = 1; + int base = 0; + const auto text_size = cv::getTextSize(str, fface, scale, thick, &base); + + std::vector tmp_points(4); + rc.points(tmp_points.data()); + const auto tl_point_idx = custom::OCVCropLabels::topLeftPointIdx(tmp_points); + cv::Point text_pos = tmp_points[tl_point_idx]; + text_pos.x = std::max(0, text_pos.x); + text_pos.y = std::max(text_size.height, text_pos.y); + + cv::rectangle(m, + text_pos + cv::Point{0, base}, + text_pos + cv::Point{text_size.width, -text_size.height}, + CV_RGB(50, 205, 50), + cv::FILLED); + const auto white = CV_RGB(255, 255, 255); + cv::putText(m, str, text_pos, fface, scale, white, thick, 8); +} + +} // anonymous namespace +} // namespace vis + +int main(int argc, char *argv[]) +{ + cv::CommandLineParser cmd(argc, argv, keys); + cmd.about(about); + if (cmd.has("help")) { + cmd.printMessage(); + return 0; + } + const auto input_file_name = cmd.get("input"); + const auto tdet_model_path = cmd.get("tdm"); + const auto trec_model_path = cmd.get("trm"); + const auto tdet_target_dev = cmd.get("tdd"); + const auto trec_target_dev = cmd.get("trd"); + const auto ctc_beam_dec_bw = cmd.get("bw"); + const auto dec_conf_thresh = cmd.get("thr"); + + const auto pad_symbol = '#'; + const auto symbol_set = cmd.get("sset") + pad_symbol; + + cv::GMat in; + cv::GOpaque in_rec_sz; + cv::GMat link, segm; + std::tie(link, segm) = cv::gapi::infer(in); + cv::GOpaque size = cv::gapi::streaming::size(in); + cv::GArray rrs = custom::PostProcess::on(link, segm, size, 0.8f, 0.8f); + cv::GArray labels = custom::CropLabels::on(in, rrs, in_rec_sz); + cv::GArray text = cv::gapi::infer2(in, labels); + + cv::GComputation graph(cv::GIn(in, in_rec_sz), + cv::GOut(cv::gapi::copy(in), rrs, text)); + + // Text detection network + auto tdet_net = cv::gapi::ie::Params { + tdet_model_path, // path to topology IR + weights_path(tdet_model_path), // path to weights + tdet_target_dev, // device specifier + }.cfgOutputLayers({"model/link_logits_/add", "model/segm_logits/add"}); + + auto trec_net = cv::gapi::ie::Params { + trec_model_path, // path to topology IR + weights_path(trec_model_path), // path to weights + trec_target_dev, // device specifier + }; + auto networks = cv::gapi::networks(tdet_net, trec_net); + + auto kernels = cv::gapi::kernels< custom::OCVPostProcess + , custom::OCVCropLabels + >(); + auto pipeline = graph.compileStreaming(cv::compile_args(kernels, networks)); + + std::cout << "Reading " << input_file_name << std::endl; + + // Input stream + auto in_src = cv::gapi::wip::make_src(input_file_name); + + // Text recognition input size (also an input parameter to the graph) + auto in_rsz = cv::Size{ 120, 32 }; + + // Set the pipeline source & start the pipeline + pipeline.setSource(cv::gin(in_src, in_rsz)); + pipeline.start(); + + // Declare the output data & run the processing loop + cv::TickMeter tm; + cv::Mat image; + std::vector out_rcs; + std::vector out_text; + + tm.start(); + int frames = 0; + while (pipeline.pull(cv::gout(image, out_rcs, out_text))) { + frames++; + + CV_Assert(out_rcs.size() == out_text.size()); + const auto num_labels = out_rcs.size(); + + std::vector tmp_points(4); + for (std::size_t l = 0; l < num_labels; l++) { + // Decode the recognized text in the rectangle + const auto &blob = out_text[l]; + const float *data = blob.ptr(); + const auto sz = blob.total(); + double conf = 1.0; + const std::string res = ctc_beam_dec_bw == 0 + ? CTCGreedyDecoder(data, sz, symbol_set, pad_symbol, &conf) + : CTCBeamSearchDecoder(data, sz, symbol_set, &conf, ctc_beam_dec_bw); + + // Draw a bounding box for this rotated rectangle + const auto &rc = out_rcs[l]; + vis::drawRotatedRect(image, rc); + + // Draw text, if decoded + if (conf >= dec_conf_thresh) { + vis::drawText(image, rc, res); + } + } + tm.stop(); + cv::imshow("Out", image); + cv::waitKey(1); + tm.start(); + } + tm.stop(); + std::cout << "Processed " << frames << " frames" + << " (" << frames / tm.getTimeSec() << " FPS)" << std::endl; + return 0; +} diff --git a/modules/gapi/src/api/kernels_core.cpp b/modules/gapi/src/api/kernels_core.cpp index 55c43594af..82aceb1f26 100644 --- a/modules/gapi/src/api/kernels_core.cpp +++ b/modules/gapi/src/api/kernels_core.cpp @@ -388,14 +388,14 @@ GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, int flags, return core::GWarpAffine::on(src, M, dsize, flags, borderMode, borderValue); } -GOpaque size(const GMat& src) +GOpaque streaming::size(const GMat& src) { - return core::GSize::on(src); + return streaming::GSize::on(src); } -GOpaque size(const GOpaque& r) +GOpaque streaming::size(const GOpaque& r) { - return core::GSizeR::on(r); + return streaming::GSizeR::on(r); } } //namespace gapi diff --git a/modules/gapi/src/backends/cpu/gcpucore.cpp b/modules/gapi/src/backends/cpu/gcpucore.cpp index f2b8f7077d..fc460149c6 100644 --- a/modules/gapi/src/backends/cpu/gcpucore.cpp +++ b/modules/gapi/src/backends/cpu/gcpucore.cpp @@ -625,7 +625,7 @@ GAPI_OCV_KERNEL(GCPUParseYolo, cv::gapi::nn::parsers::GParseYolo) } }; -GAPI_OCV_KERNEL(GCPUSize, cv::gapi::core::GSize) +GAPI_OCV_KERNEL(GCPUSize, cv::gapi::streaming::GSize) { static void run(const cv::Mat& in, cv::Size& out) { @@ -634,7 +634,7 @@ GAPI_OCV_KERNEL(GCPUSize, cv::gapi::core::GSize) } }; -GAPI_OCV_KERNEL(GCPUSizeR, cv::gapi::core::GSizeR) +GAPI_OCV_KERNEL(GCPUSizeR, cv::gapi::streaming::GSizeR) { static void run(const cv::Rect& in, cv::Size& out) { diff --git a/modules/gapi/test/common/gapi_core_tests_inl.hpp b/modules/gapi/test/common/gapi_core_tests_inl.hpp index e11324f070..1a167ad5ea 100644 --- a/modules/gapi/test/common/gapi_core_tests_inl.hpp +++ b/modules/gapi/test/common/gapi_core_tests_inl.hpp @@ -1691,7 +1691,7 @@ TEST_P(SizeTest, ParseTest) cv::GMat in; cv::Size out_sz; - auto out = cv::gapi::size(in); + auto out = cv::gapi::streaming::size(in); cv::GComputation c(cv::GIn(in), cv::GOut(out)); c.apply(cv::gin(in_mat1), cv::gout(out_sz), getCompileArgs()); @@ -1704,7 +1704,7 @@ TEST_P(SizeRTest, ParseTest) cv::Size out_sz; cv::GOpaque op_rect; - auto out = cv::gapi::size(op_rect); + auto out = cv::gapi::streaming::size(op_rect); cv::GComputation c(cv::GIn(op_rect), cv::GOut(out)); c.apply(cv::gin(rect), cv::gout(out_sz), getCompileArgs()); From d31b6c3480c601305fa1e153a819aad0bd247fe2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20=C5=A0tefa=C5=88=C3=A1k?= Date: Fri, 16 Oct 2020 00:28:15 +0200 Subject: [PATCH 043/152] stitching: add warpPointBackward to warpers test by projecting and reprojecting back random points --- .../opencv2/stitching/detail/warpers.hpp | 31 +++++ .../opencv2/stitching/detail/warpers_inl.hpp | 8 ++ .../include/opencv2/stitching/warpers.hpp | 16 +++ modules/stitching/src/warpers.cpp | 28 ++++ modules/stitching/test/test_reprojection.cpp | 131 ++++++++++++++++++ 5 files changed, 214 insertions(+) create mode 100644 modules/stitching/test/test_reprojection.cpp diff --git a/modules/stitching/include/opencv2/stitching/detail/warpers.hpp b/modules/stitching/include/opencv2/stitching/detail/warpers.hpp index bc2c6e3546..ff005e8da2 100644 --- a/modules/stitching/include/opencv2/stitching/detail/warpers.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/warpers.hpp @@ -70,6 +70,23 @@ public: */ virtual Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) = 0; + /** @brief Projects the image point backward. + + @param pt Projected point + @param K Camera intrinsic parameters + @param R Camera rotation matrix + @return Backward-projected point + */ +#if CV_VERSION_MAJOR == 4 + virtual Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R) + { + CV_UNUSED(pt); CV_UNUSED(K); CV_UNUSED(R); + CV_Error(Error::StsNotImplemented, ""); + } +#else + virtual Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R) = 0; +#endif + /** @brief Builds the projection maps according to the given camera data. @param src_size Source image size @@ -143,6 +160,8 @@ class CV_EXPORTS_TEMPLATE RotationWarperBase : public RotationWarper public: Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE; + Point2f warpPointBackward(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE; + Rect buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) CV_OVERRIDE; Point warp(InputArray src, InputArray K, InputArray R, int interp_mode, int border_mode, @@ -189,6 +208,9 @@ public: Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R) CV_OVERRIDE; Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R, InputArray T); + Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R) CV_OVERRIDE; + Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R, InputArray T); + virtual Rect buildMaps(Size src_size, InputArray K, InputArray R, InputArray T, CV_OUT OutputArray xmap, CV_OUT OutputArray ymap); Rect buildMaps(Size src_size, InputArray K, InputArray R, CV_OUT OutputArray xmap, CV_OUT OutputArray ymap) CV_OVERRIDE; @@ -228,6 +250,15 @@ public: */ Point2f warpPoint(const Point2f &pt, InputArray K, InputArray H) CV_OVERRIDE; + /** @brief Projects the image point backward. + + @param pt Projected point + @param K Camera intrinsic parameters + @param H Camera extrinsic parameters + @return Backward-projected point + */ + Point2f warpPointBackward(const Point2f &pt, InputArray K, InputArray H) CV_OVERRIDE; + /** @brief Builds the projection maps according to the given camera data. @param src_size Source image size diff --git a/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp b/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp index f4a19d9c24..5e2375621e 100644 --- a/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/warpers_inl.hpp @@ -61,6 +61,14 @@ Point2f RotationWarperBase

::warpPoint(const Point2f &pt, InputArray K, InputA return uv; } +template +Point2f RotationWarperBase

::warpPointBackward(const Point2f& pt, InputArray K, InputArray R) +{ + projector_.setCameraParams(K, R); + Point2f xy; + projector_.mapBackward(pt.x, pt.y, xy.x, xy.y); + return xy; +} template Rect RotationWarperBase

::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray _xmap, OutputArray _ymap) diff --git a/modules/stitching/include/opencv2/stitching/warpers.hpp b/modules/stitching/include/opencv2/stitching/warpers.hpp index ff43386107..aa1ce5a6a7 100644 --- a/modules/stitching/include/opencv2/stitching/warpers.hpp +++ b/modules/stitching/include/opencv2/stitching/warpers.hpp @@ -65,6 +65,22 @@ namespace cv { */ CV_WRAP Point2f warpPoint(const Point2f &pt, InputArray K, InputArray R); + /** @brief Projects the image point backward. + + @param pt Projected point + @param K Camera intrinsic parameters + @param R Camera rotation matrix + @return Backward-projected point + */ +#if CV_VERSION_MAJOR == 4 + CV_WRAP Point2f warpPointBackward(const Point2f& pt, InputArray K, InputArray R) + { + CV_UNUSED(pt); CV_UNUSED(K); CV_UNUSED(R); + CV_Error(Error::StsNotImplemented, ""); + } +#else + CV_WRAP Point2f warpPointBackward(const Point2f &pt, InputArray K, InputArray R); +#endif /** @brief Builds the projection maps according to the given camera data. @param src_size Source image size diff --git a/modules/stitching/src/warpers.cpp b/modules/stitching/src/warpers.cpp index 4360590c94..85ac939074 100644 --- a/modules/stitching/src/warpers.cpp +++ b/modules/stitching/src/warpers.cpp @@ -92,6 +92,14 @@ Point2f PyRotationWarper::warpPoint(const Point2f &pt, InputArray K, InputArray { return rw.get()->warpPoint(pt, K, R); } + +#if CV_VERSION_MAJOR != 4 +Point2f PyRotationWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R) +{ + return rw.get()->warpPointBackward(pt, K, R); +} +#endif + Rect PyRotationWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) { return rw.get()->buildMaps(src_size, K, R, xmap, ymap); @@ -164,6 +172,20 @@ Point2f PlaneWarper::warpPoint(const Point2f &pt, InputArray K, InputArray R) Mat_ T(3, 1, tz); return warpPoint(pt, K, R, T); } +Point2f PlaneWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R, InputArray T) +{ + projector_.setCameraParams(K, R, T); + Point2f xy; + projector_.mapBackward(pt.x, pt.y, xy.x, xy.y); + return xy; +} + +Point2f PlaneWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray R) +{ + float tz[] = { 0.f, 0.f, 0.f }; + Mat_ T(3, 1, tz); + return warpPointBackward(pt, K, R, T); +} Rect PlaneWarper::buildMaps(Size src_size, InputArray K, InputArray R, OutputArray xmap, OutputArray ymap) { @@ -299,6 +321,12 @@ Point2f AffineWarper::warpPoint(const Point2f &pt, InputArray K, InputArray H) return PlaneWarper::warpPoint(pt, K, R, T); } +Point2f AffineWarper::warpPointBackward(const Point2f& pt, InputArray K, InputArray H) +{ + Mat R, T; + getRTfromHomogeneous(H, R, T); + return PlaneWarper::warpPointBackward(pt, K, R, T); +} Rect AffineWarper::buildMaps(Size src_size, InputArray K, InputArray H, OutputArray xmap, OutputArray ymap) { diff --git a/modules/stitching/test/test_reprojection.cpp b/modules/stitching/test/test_reprojection.cpp new file mode 100644 index 0000000000..076bbb769d --- /dev/null +++ b/modules/stitching/test/test_reprojection.cpp @@ -0,0 +1,131 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" +#include "opencv2/stitching/warpers.hpp" + +namespace opencv_test { namespace { +class ReprojectionTest : public ::testing::Test { + +protected: + const size_t TEST_COUNT = 15; + Mat K, R; + RNG rng = RNG(0); + ReprojectionTest() + { + K = Mat::eye(3, 3, CV_32FC1); + float angle = (float)(30.0 * CV_PI / 180.0); + float rotationMatrix[9] = { + (float)cos(angle), (float)sin(angle), 0, + (float)-sin(angle), (float)cos(angle), 0, + 0, 0, 1 + }; + Mat(3, 3, CV_32FC1, rotationMatrix).copyTo(R); + } + void TestReprojection(Ptr warper, Point2f pt) { + Point2f projected_pt = warper->warpPoint(pt, K, R); + Point2f reprojected_pt = warper->warpPointBackward(projected_pt, K, R); + EXPECT_NEAR(pt.x, reprojected_pt.x, float( 1e-5)); + EXPECT_NEAR(pt.y, reprojected_pt.y, float( 1e-5)); + } +}; + + +TEST_F(ReprojectionTest, PlaneWarper) +{ + Ptr creator = makePtr(); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, AffineWarper) +{ + Ptr creator = makePtr(); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, CylindricalWarper) +{ + Ptr creator = makePtr(); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, SphericalWarper) +{ + Ptr creator = makePtr(); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, FisheyeWarper) +{ + Ptr creator = makePtr(); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, StereographicWarper) +{ + Ptr creator = makePtr(); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, CompressedRectilinearWarper) +{ + Ptr creator = makePtr(1.5f, 1.0f); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, CompressedRectilinearPortraitWarper) +{ + Ptr creator = makePtr(1.5f, 1.0f); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, PaniniWarper) +{ + Ptr creator = makePtr(1.5f, 1.0f); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, PaniniPortraitWarper) +{ + Ptr creator = makePtr(1.5f, 1.0f); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, MercatorWarper) +{ + Ptr creator = makePtr(); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +TEST_F(ReprojectionTest, TransverseMercatorWarper) +{ + Ptr creator = makePtr(); + for (size_t i = 0; i < TEST_COUNT; ++i) { + TestReprojection(creator->create(1), Point2f(rng.uniform(-1.f, 1.f), rng.uniform(-1.f, 1.f))); + } +} + +}} // namespace From 22ee5c0c4db9111780c6d34eb2a3c85e0f4046ff Mon Sep 17 00:00:00 2001 From: Rob Timpe Date: Wed, 21 Oct 2020 15:51:46 -0700 Subject: [PATCH 044/152] Fix errors when building with cuda stubs Fixes two errors when building with the options WITH_CUDA=ON and BUILD_CUDA_STUBS=ON on a machine without CUDA. In the cudaarithm module, make sure cuda_runtime.h only gets included when CUDA is installed. In the stitching module, don't assume that cuda is present just because cudaarithm and cudawarping are present (as is the case when building with the above options). --- modules/cudaarithm/src/lut.cpp | 5 +++-- modules/stitching/src/blenders.cpp | 16 ++++++++-------- 2 files changed, 11 insertions(+), 10 deletions(-) diff --git a/modules/cudaarithm/src/lut.cpp b/modules/cudaarithm/src/lut.cpp index a4b4e02650..5ef2836017 100644 --- a/modules/cudaarithm/src/lut.cpp +++ b/modules/cudaarithm/src/lut.cpp @@ -4,8 +4,6 @@ #include "precomp.hpp" -#include "lut.hpp" - using namespace cv; using namespace cv::cuda; @@ -15,6 +13,9 @@ Ptr cv::cuda::createLookUpTable(InputArray) { throw_no_cuda(); retu #else /* !defined (HAVE_CUDA) || defined (CUDA_DISABLER) */ +// lut.hpp includes cuda_runtime.h and can only be included when we have CUDA +#include "lut.hpp" + Ptr cv::cuda::createLookUpTable(InputArray lut) { return makePtr(lut); diff --git a/modules/stitching/src/blenders.cpp b/modules/stitching/src/blenders.cpp index aeddc142dc..05e7ca85e4 100644 --- a/modules/stitching/src/blenders.cpp +++ b/modules/stitching/src/blenders.cpp @@ -219,7 +219,7 @@ MultiBandBlender::MultiBandBlender(int try_gpu, int num_bands, int weight_type) num_bands_ = 0; setNumBands(num_bands); -#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) can_use_gpu_ = try_gpu && cuda::getCudaEnabledDeviceCount(); gpu_feed_idx_ = 0; #else @@ -246,7 +246,7 @@ void MultiBandBlender::prepare(Rect dst_roi) Blender::prepare(dst_roi); -#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) if (can_use_gpu_) { gpu_initialized_ = false; @@ -332,7 +332,7 @@ void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl) UMat img; -#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) // If using gpu save the top left coordinate when running first time after prepare if (can_use_gpu_) { @@ -353,7 +353,7 @@ void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl) { img = _img.getUMat(); } -#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) else { gpu_img_ = _img.getGpuMat(); @@ -394,7 +394,7 @@ void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl) int bottom = br_new.y - tl.y - img.rows; int right = br_new.x - tl.x - img.cols; -#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) if (can_use_gpu_) { if (!gpu_initialized_) @@ -603,7 +603,7 @@ void MultiBandBlender::feed(InputArray _img, InputArray mask, Point tl) void MultiBandBlender::blend(InputOutputArray dst, InputOutputArray dst_mask) { Rect dst_rc(0, 0, dst_roi_final_.width, dst_roi_final_.height); -#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) if (can_use_gpu_) { if (!gpu_initialized_) @@ -850,7 +850,7 @@ void createLaplacePyr(InputArray img, int num_levels, std::vector &pyr) void createLaplacePyrGpu(InputArray img, int num_levels, std::vector &pyr) { -#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) pyr.resize(num_levels + 1); std::vector gpu_pyr(num_levels + 1); @@ -891,7 +891,7 @@ void restoreImageFromLaplacePyr(std::vector &pyr) void restoreImageFromLaplacePyrGpu(std::vector &pyr) { -#if defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) +#if defined(HAVE_CUDA) && defined(HAVE_OPENCV_CUDAARITHM) && defined(HAVE_OPENCV_CUDAWARPING) if (pyr.empty()) return; From aac7c5465ba6ccfe0dc665ab0bae87f765e616ba Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 21 Oct 2020 22:47:56 +0000 Subject: [PATCH 045/152] core: move inline code from mat.inl.hpp --- modules/core/include/opencv2/core/mat.inl.hpp | 562 ------------------ modules/core/src/matrix.cpp | 280 ++++++++- modules/core/src/matrix_sparse.cpp | 88 +++ modules/core/src/umatrix.cpp | 146 +++++ 4 files changed, 513 insertions(+), 563 deletions(-) diff --git a/modules/core/include/opencv2/core/mat.inl.hpp b/modules/core/include/opencv2/core/mat.inl.hpp index 9b7df87d8b..b6ffd81795 100644 --- a/modules/core/include/opencv2/core/mat.inl.hpp +++ b/modules/core/include/opencv2/core/mat.inl.hpp @@ -489,158 +489,6 @@ CV__DEBUG_NS_END //////////////////////////////////////////// Mat ////////////////////////////////////////// -inline -Mat::Mat() - : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), - datalimit(0), allocator(0), u(0), size(&rows), step(0) -{} - -inline -Mat::Mat(int _rows, int _cols, int _type) - : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), - datalimit(0), allocator(0), u(0), size(&rows), step(0) -{ - create(_rows, _cols, _type); -} - -inline -Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s) - : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), - datalimit(0), allocator(0), u(0), size(&rows), step(0) -{ - create(_rows, _cols, _type); - *this = _s; -} - -inline -Mat::Mat(Size _sz, int _type) - : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), - datalimit(0), allocator(0), u(0), size(&rows), step(0) -{ - create( _sz.height, _sz.width, _type ); -} - -inline -Mat::Mat(Size _sz, int _type, const Scalar& _s) - : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), - datalimit(0), allocator(0), u(0), size(&rows), step(0) -{ - create(_sz.height, _sz.width, _type); - *this = _s; -} - -inline -Mat::Mat(int _dims, const int* _sz, int _type) - : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), - datalimit(0), allocator(0), u(0), size(&rows), step(0) -{ - create(_dims, _sz, _type); -} - -inline -Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s) - : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), - datalimit(0), allocator(0), u(0), size(&rows), step(0) -{ - create(_dims, _sz, _type); - *this = _s; -} - -inline -Mat::Mat(const std::vector& _sz, int _type) - : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), - datalimit(0), allocator(0), u(0), size(&rows), step(0) -{ - create(_sz, _type); -} - -inline -Mat::Mat(const std::vector& _sz, int _type, const Scalar& _s) - : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), - datalimit(0), allocator(0), u(0), size(&rows), step(0) -{ - create(_sz, _type); - *this = _s; -} - -inline -Mat::Mat(const Mat& m) - : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), - datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator), - u(m.u), size(&rows), step(0) -{ - if( u ) - CV_XADD(&u->refcount, 1); - if( m.dims <= 2 ) - { - step[0] = m.step[0]; step[1] = m.step[1]; - } - else - { - dims = 0; - copySize(m); - } -} - -inline -Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step) - : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols), - data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0), - allocator(0), u(0), size(&rows) -{ - CV_Assert(total() == 0 || data != NULL); - - size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type); - size_t minstep = cols * esz; - if( _step == AUTO_STEP ) - { - _step = minstep; - } - else - { - CV_DbgAssert( _step >= minstep ); - if (_step % esz1 != 0) - { - CV_Error(Error::BadStep, "Step must be a multiple of esz1"); - } - } - step[0] = _step; - step[1] = esz; - datalimit = datastart + _step * rows; - dataend = datalimit - _step + minstep; - updateContinuityFlag(); -} - -inline -Mat::Mat(Size _sz, int _type, void* _data, size_t _step) - : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width), - data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0), - allocator(0), u(0), size(&rows) -{ - CV_Assert(total() == 0 || data != NULL); - - size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type); - size_t minstep = cols*esz; - if( _step == AUTO_STEP ) - { - _step = minstep; - } - else - { - CV_DbgAssert( _step >= minstep ); - - if (_step % esz1 != 0) - { - CV_Error(Error::BadStep, "Step must be a multiple of esz1"); - } - } - step[0] = _step; - step[1] = esz; - datalimit = datastart + _step*rows; - dataend = datalimit - _step + minstep; - updateContinuityFlag(); -} - template inline Mat::Mat(const std::vector<_Tp>& vec, bool copyData) : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()), @@ -778,43 +626,6 @@ Mat::Mat(const MatCommaInitializer_<_Tp>& commaInitializer) *this = commaInitializer.operator Mat_<_Tp>(); } -inline -Mat::~Mat() -{ - release(); - if( step.p != step.buf ) - fastFree(step.p); -} - -inline -Mat& Mat::operator = (const Mat& m) -{ - if( this != &m ) - { - if( m.u ) - CV_XADD(&m.u->refcount, 1); - release(); - flags = m.flags; - if( dims <= 2 && m.dims <= 2 ) - { - dims = m.dims; - rows = m.rows; - cols = m.cols; - step[0] = m.step[0]; - step[1] = m.step[1]; - } - else - copySize(m); - data = m.data; - datastart = m.datastart; - dataend = m.dataend; - datalimit = m.datalimit; - allocator = m.allocator; - u = m.u; - } - return *this; -} - inline Mat Mat::row(int y) const { @@ -851,67 +662,6 @@ Mat Mat::colRange(const Range& r) const return Mat(*this, Range::all(), r); } -inline -Mat Mat::clone() const -{ - Mat m; - copyTo(m); - return m; -} - -inline -void Mat::assignTo( Mat& m, int _type ) const -{ - if( _type < 0 ) - m = *this; - else - convertTo(m, _type); -} - -inline -void Mat::create(int _rows, int _cols, int _type) -{ - _type &= TYPE_MASK; - if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data ) - return; - int sz[] = {_rows, _cols}; - create(2, sz, _type); -} - -inline -void Mat::create(Size _sz, int _type) -{ - create(_sz.height, _sz.width, _type); -} - -inline -void Mat::addref() -{ - if( u ) - CV_XADD(&u->refcount, 1); -} - -inline -void Mat::release() -{ - if( u && CV_XADD(&u->refcount, -1) == 1 ) - deallocate(); - u = NULL; - datastart = dataend = datalimit = data = 0; - for(int i = 0; i < dims; i++) - size.p[i] = 0; -#ifdef _DEBUG - flags = MAGIC_VAL; - dims = rows = cols = 0; - if(step.p != step.buf) - { - fastFree(step.p); - step.p = step.buf; - size.p = &rows; - } -#endif -} - inline Mat Mat::operator()( Range _rowRange, Range _colRange ) const { @@ -980,40 +730,6 @@ int Mat::channels() const return CV_MAT_CN(flags); } -inline -size_t Mat::step1(int i) const -{ - return step.p[i] / elemSize1(); -} - -inline -bool Mat::empty() const -{ - return data == 0 || total() == 0 || dims == 0; -} - -inline -size_t Mat::total() const -{ - if( dims <= 2 ) - return (size_t)rows * cols; - size_t p = 1; - for( int i = 0; i < dims; i++ ) - p *= size[i]; - return p; -} - -inline -size_t Mat::total(int startDim, int endDim) const -{ - CV_Assert( 0 <= startDim && startDim <= endDim); - size_t p = 1; - int endDim_ = endDim <= dims ? endDim : dims; - for( int i = startDim; i < endDim_; i++ ) - p *= size[i]; - return p; -} - inline uchar* Mat::ptr(int y) { @@ -1544,22 +1260,6 @@ MatSize::operator const int*() const return p; } -inline -bool MatSize::operator == (const MatSize& sz) const -{ - int d = dims(); - int dsz = sz.dims(); - if( d != dsz ) - return false; - if( d == 2 ) - return p[0] == sz.p[0] && p[1] == sz.p[1]; - - for( int i = 0; i < d; i++ ) - if( p[i] != sz.p[i] ) - return false; - return true; -} - inline bool MatSize::operator != (const MatSize& sz) const { @@ -1820,9 +1520,7 @@ template inline void Mat_<_Tp>::release() { Mat::release(); -#ifdef _DEBUG flags = (flags & ~CV_MAT_TYPE_MASK) | traits::Type<_Tp>::value; -#endif } template inline @@ -2182,51 +1880,6 @@ Mat_<_Tp>::Mat_(MatExpr&& e) ///////////////////////////// SparseMat ///////////////////////////// -inline -SparseMat::SparseMat() - : flags(MAGIC_VAL), hdr(0) -{} - -inline -SparseMat::SparseMat(int _dims, const int* _sizes, int _type) - : flags(MAGIC_VAL), hdr(0) -{ - create(_dims, _sizes, _type); -} - -inline -SparseMat::SparseMat(const SparseMat& m) - : flags(m.flags), hdr(m.hdr) -{ - addref(); -} - -inline -SparseMat::~SparseMat() -{ - release(); -} - -inline -SparseMat& SparseMat::operator = (const SparseMat& m) -{ - if( this != &m ) - { - if( m.hdr ) - CV_XADD(&m.hdr->refcount, 1); - release(); - flags = m.flags; - hdr = m.hdr; - } - return *this; -} - -inline -SparseMat& SparseMat::operator = (const Mat& m) -{ - return (*this = SparseMat(m)); -} - inline SparseMat SparseMat::clone() const { @@ -2235,30 +1888,6 @@ SparseMat SparseMat::clone() const return temp; } -inline -void SparseMat::assignTo( SparseMat& m, int _type ) const -{ - if( _type < 0 ) - m = *this; - else - convertTo(m, _type); -} - -inline -void SparseMat::addref() -{ - if( hdr ) - CV_XADD(&hdr->refcount, 1); -} - -inline -void SparseMat::release() -{ - if( hdr && CV_XADD(&hdr->refcount, -1) == 1 ) - delete hdr; - hdr = 0; -} - inline size_t SparseMat::elemSize() const { @@ -2318,36 +1947,6 @@ size_t SparseMat::nzcount() const return hdr ? hdr->nodeCount : 0; } -inline -size_t SparseMat::hash(int i0) const -{ - return (size_t)i0; -} - -inline -size_t SparseMat::hash(int i0, int i1) const -{ - return (size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1; -} - -inline -size_t SparseMat::hash(int i0, int i1, int i2) const -{ - return ((size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1) * HASH_SCALE + (unsigned)i2; -} - -inline -size_t SparseMat::hash(const int* idx) const -{ - size_t h = (unsigned)idx[0]; - if( !hdr ) - return 0; - int d = hdr->dims; - for(int i = 1; i < d; i++ ) - h = h * HASH_SCALE + (unsigned)idx[i]; - return h; -} - template inline _Tp& SparseMat::ref(int i0, size_t* hashval) { @@ -3667,74 +3266,6 @@ const Mat_<_Tp>& operator /= (const Mat_<_Tp>& a, const MatExpr& b) //////////////////////////////// UMat //////////////////////////////// -inline -UMat::UMat(UMatUsageFlags _usageFlags) -: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) -{} - -inline -UMat::UMat(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags) -: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) -{ - create(_rows, _cols, _type); -} - -inline -UMat::UMat(int _rows, int _cols, int _type, const Scalar& _s, UMatUsageFlags _usageFlags) -: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) -{ - create(_rows, _cols, _type); - *this = _s; -} - -inline -UMat::UMat(Size _sz, int _type, UMatUsageFlags _usageFlags) -: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) -{ - create( _sz.height, _sz.width, _type ); -} - -inline -UMat::UMat(Size _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags) -: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) -{ - create(_sz.height, _sz.width, _type); - *this = _s; -} - -inline -UMat::UMat(int _dims, const int* _sz, int _type, UMatUsageFlags _usageFlags) -: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) -{ - create(_dims, _sz, _type); -} - -inline -UMat::UMat(int _dims, const int* _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags) -: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) -{ - create(_dims, _sz, _type); - *this = _s; -} - -inline -UMat::UMat(const UMat& m) -: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator), - usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows) -{ - addref(); - if( m.dims <= 2 ) - { - step[0] = m.step[0]; step[1] = m.step[1]; - } - else - { - dims = 0; - copySize(m); - } -} - - template inline UMat::UMat(const std::vector<_Tp>& vec, bool copyData) : flags(MAGIC_VAL | traits::Type<_Tp>::value | CV_MAT_CONT_FLAG), dims(2), rows((int)vec.size()), @@ -3751,33 +3282,6 @@ cols(1), allocator(0), usageFlags(USAGE_DEFAULT), u(0), offset(0), size(&rows) Mat((int)vec.size(), 1, traits::Type<_Tp>::value, (uchar*)&vec[0]).copyTo(*this); } -inline -UMat& UMat::operator = (const UMat& m) -{ - if( this != &m ) - { - const_cast(m).addref(); - release(); - flags = m.flags; - if( dims <= 2 && m.dims <= 2 ) - { - dims = m.dims; - rows = m.rows; - cols = m.cols; - step[0] = m.step[0]; - step[1] = m.step[1]; - } - else - copySize(m); - allocator = m.allocator; - if (usageFlags == USAGE_DEFAULT) - usageFlags = m.usageFlags; - u = m.u; - offset = m.offset; - } - return *this; -} - inline UMat UMat::row(int y) const { @@ -3814,55 +3318,6 @@ UMat UMat::colRange(const Range& r) const return UMat(*this, Range::all(), r); } -inline -UMat UMat::clone() const -{ - UMat m; - copyTo(m); - return m; -} - -inline -void UMat::assignTo( UMat& m, int _type ) const -{ - if( _type < 0 ) - m = *this; - else - convertTo(m, _type); -} - -inline -void UMat::create(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags) -{ - _type &= TYPE_MASK; - if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && u ) - return; - int sz[] = {_rows, _cols}; - create(2, sz, _type, _usageFlags); -} - -inline -void UMat::create(Size _sz, int _type, UMatUsageFlags _usageFlags) -{ - create(_sz.height, _sz.width, _type, _usageFlags); -} - -inline -void UMat::addref() -{ - if( u ) - CV_XADD(&(u->urefcount), 1); -} - -inline void UMat::release() -{ - if( u && CV_XADD(&(u->urefcount), -1) == 1 ) - deallocate(); - for(int i = 0; i < dims; i++) - size.p[i] = 0; - u = 0; -} - inline UMat UMat::operator()( Range _rowRange, Range _colRange ) const { @@ -3937,23 +3392,6 @@ size_t UMat::step1(int i) const return step.p[i] / elemSize1(); } -inline -bool UMat::empty() const -{ - return u == 0 || total() == 0 || dims == 0; -} - -inline -size_t UMat::total() const -{ - if( dims <= 2 ) - return (size_t)rows * cols; - size_t p = 1; - for( int i = 0; i < dims; i++ ) - p *= size[i]; - return p; -} - #ifdef CV_CXX_MOVE_SEMANTICS inline diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index fc9e4c69b2..178e291d3f 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -204,6 +204,21 @@ MatAllocator* Mat::getStdAllocator() //================================================================================================== +bool MatSize::operator==(const MatSize& sz) const +{ + int d = dims(); + int dsz = sz.dims(); + if( d != dsz ) + return false; + if( d == 2 ) + return p[0] == sz.p[0] && p[1] == sz.p[1]; + + for( int i = 0; i < d; i++ ) + if( p[i] != sz.p[i] ) + return false; + return true; +} + void setSize( Mat& m, int _dims, const int* _sz, const size_t* _steps, bool autoSteps) { CV_Assert( 0 <= _dims && _dims <= CV_MAX_DIM ); @@ -320,7 +335,270 @@ void finalizeHdr(Mat& m) m.dataend = m.datalimit = 0; } -//================================================================================================== +//======================================= Mat ====================================================== + +Mat::Mat() + : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), + datalimit(0), allocator(0), u(0), size(&rows), step(0) +{} + +Mat::Mat(int _rows, int _cols, int _type) + : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), + datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + create(_rows, _cols, _type); +} + +Mat::Mat(int _rows, int _cols, int _type, const Scalar& _s) + : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), + datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + create(_rows, _cols, _type); + *this = _s; +} + +Mat::Mat(Size _sz, int _type) + : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), + datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + create( _sz.height, _sz.width, _type ); +} + +Mat::Mat(Size _sz, int _type, const Scalar& _s) + : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), + datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + create(_sz.height, _sz.width, _type); + *this = _s; +} + +Mat::Mat(int _dims, const int* _sz, int _type) + : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), + datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + create(_dims, _sz, _type); +} + +Mat::Mat(int _dims, const int* _sz, int _type, const Scalar& _s) + : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), + datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + create(_dims, _sz, _type); + *this = _s; +} + +Mat::Mat(const std::vector& _sz, int _type) + : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), + datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + create(_sz, _type); +} + +Mat::Mat(const std::vector& _sz, int _type, const Scalar& _s) + : flags(MAGIC_VAL), dims(0), rows(0), cols(0), data(0), datastart(0), dataend(0), + datalimit(0), allocator(0), u(0), size(&rows), step(0) +{ + create(_sz, _type); + *this = _s; +} + +Mat::Mat(const Mat& m) + : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), + datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator), + u(m.u), size(&rows), step(0) +{ + if( u ) + CV_XADD(&u->refcount, 1); + if( m.dims <= 2 ) + { + step[0] = m.step[0]; step[1] = m.step[1]; + } + else + { + dims = 0; + copySize(m); + } +} + +Mat::Mat(int _rows, int _cols, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_rows), cols(_cols), + data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0), + allocator(0), u(0), size(&rows) +{ + CV_Assert(total() == 0 || data != NULL); + + size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type); + size_t minstep = cols * esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + } + else + { + CV_Assert( _step >= minstep ); + if (_step % esz1 != 0) + { + CV_Error(Error::BadStep, "Step must be a multiple of esz1"); + } + } + step[0] = _step; + step[1] = esz; + datalimit = datastart + _step * rows; + dataend = datalimit - _step + minstep; + updateContinuityFlag(); +} + +Mat::Mat(Size _sz, int _type, void* _data, size_t _step) + : flags(MAGIC_VAL + (_type & TYPE_MASK)), dims(2), rows(_sz.height), cols(_sz.width), + data((uchar*)_data), datastart((uchar*)_data), dataend(0), datalimit(0), + allocator(0), u(0), size(&rows) +{ + CV_Assert(total() == 0 || data != NULL); + + size_t esz = CV_ELEM_SIZE(_type), esz1 = CV_ELEM_SIZE1(_type); + size_t minstep = cols*esz; + if( _step == AUTO_STEP ) + { + _step = minstep; + } + else + { + CV_Assert(_step >= minstep); + + if (_step % esz1 != 0) + { + CV_Error(Error::BadStep, "Step must be a multiple of esz1"); + } + } + step[0] = _step; + step[1] = esz; + datalimit = datastart + _step*rows; + dataend = datalimit - _step + minstep; + updateContinuityFlag(); +} + + +Mat::~Mat() +{ + release(); + if( step.p != step.buf ) + fastFree(step.p); +} + +Mat& Mat::operator=(const Mat& m) +{ + if( this != &m ) + { + if( m.u ) + CV_XADD(&m.u->refcount, 1); + release(); + flags = m.flags; + if( dims <= 2 && m.dims <= 2 ) + { + dims = m.dims; + rows = m.rows; + cols = m.cols; + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + copySize(m); + data = m.data; + datastart = m.datastart; + dataend = m.dataend; + datalimit = m.datalimit; + allocator = m.allocator; + u = m.u; + } + return *this; +} + +Mat Mat::clone() const +{ + Mat m; + copyTo(m); + return m; +} + +void Mat::assignTo( Mat& m, int _type ) const +{ + if( _type < 0 ) + m = *this; + else + convertTo(m, _type); +} + +void Mat::create(int _rows, int _cols, int _type) +{ + _type &= TYPE_MASK; + if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && data ) + return; + int sz[] = {_rows, _cols}; + create(2, sz, _type); +} + +void Mat::create(Size _sz, int _type) +{ + create(_sz.height, _sz.width, _type); +} + +void Mat::addref() +{ + if( u ) + CV_XADD(&u->refcount, 1); +} + +void Mat::release() +{ + if( u && CV_XADD(&u->refcount, -1) == 1 ) + deallocate(); + u = NULL; + datastart = dataend = datalimit = data = 0; + for(int i = 0; i < dims; i++) + size.p[i] = 0; +#ifdef _DEBUG + flags = MAGIC_VAL; + dims = rows = cols = 0; + if(step.p != step.buf) + { + fastFree(step.p); + step.p = step.buf; + size.p = &rows; + } +#endif +} + +size_t Mat::step1(int i) const +{ + return step.p[i] / elemSize1(); +} + +bool Mat::empty() const +{ + return data == 0 || total() == 0 || dims == 0; +} + +size_t Mat::total() const +{ + if( dims <= 2 ) + return (size_t)rows * cols; + size_t p = 1; + for( int i = 0; i < dims; i++ ) + p *= size[i]; + return p; +} + +size_t Mat::total(int startDim, int endDim) const +{ + CV_Assert( 0 <= startDim && startDim <= endDim); + size_t p = 1; + int endDim_ = endDim <= dims ? endDim : dims; + for( int i = startDim; i < endDim_; i++ ) + p *= size[i]; + return p; +} + + void Mat::create(int d, const int* _sizes, int _type) { diff --git a/modules/core/src/matrix_sparse.cpp b/modules/core/src/matrix_sparse.cpp index 61e7e90a56..05d16d706e 100644 --- a/modules/core/src/matrix_sparse.cpp +++ b/modules/core/src/matrix_sparse.cpp @@ -176,6 +176,94 @@ void SparseMat::Hdr::clear() nodeCount = freeList = 0; } +///////////////////////////// SparseMat ///////////////////////////// + +SparseMat::SparseMat() + : flags(MAGIC_VAL), hdr(0) +{} + +SparseMat::SparseMat(int _dims, const int* _sizes, int _type) + : flags(MAGIC_VAL), hdr(0) +{ + create(_dims, _sizes, _type); +} + +SparseMat::SparseMat(const SparseMat& m) + : flags(m.flags), hdr(m.hdr) +{ + addref(); +} + +SparseMat::~SparseMat() +{ + release(); +} + +SparseMat& SparseMat::operator = (const SparseMat& m) +{ + if( this != &m ) + { + if( m.hdr ) + CV_XADD(&m.hdr->refcount, 1); + release(); + flags = m.flags; + hdr = m.hdr; + } + return *this; +} + +SparseMat& SparseMat::operator=(const Mat& m) +{ + return (*this = SparseMat(m)); +} + +void SparseMat::assignTo(SparseMat& m, int _type) const +{ + if( _type < 0 ) + m = *this; + else + convertTo(m, _type); +} + +void SparseMat::addref() +{ + if( hdr ) + CV_XADD(&hdr->refcount, 1); +} + +void SparseMat::release() +{ + if( hdr && CV_XADD(&hdr->refcount, -1) == 1 ) + delete hdr; + hdr = 0; +} + +size_t SparseMat::hash(int i0) const +{ + return (size_t)i0; +} + +size_t SparseMat::hash(int i0, int i1) const +{ + return (size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1; +} + +size_t SparseMat::hash(int i0, int i1, int i2) const +{ + return ((size_t)(unsigned)i0 * HASH_SCALE + (unsigned)i1) * HASH_SCALE + (unsigned)i2; +} + +size_t SparseMat::hash(const int* idx) const +{ + size_t h = (unsigned)idx[0]; + if( !hdr ) + return 0; + int d = hdr->dims; + for(int i = 1; i < d; i++ ) + h = h * HASH_SCALE + (unsigned)idx[i]; + return h; +} + SparseMat::SparseMat(const Mat& m) : flags(MAGIC_VAL), hdr(0) diff --git a/modules/core/src/umatrix.cpp b/modules/core/src/umatrix.cpp index 9fe8122d22..f21cf7b7e2 100644 --- a/modules/core/src/umatrix.cpp +++ b/modules/core/src/umatrix.cpp @@ -228,6 +228,152 @@ UMatDataAutoLock::~UMatDataAutoLock() getUMatDataAutoLocker().release(u1, u2); } +//////////////////////////////// UMat //////////////////////////////// + +UMat::UMat(UMatUsageFlags _usageFlags) +: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) +{} + +UMat::UMat(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags) +: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) +{ + create(_rows, _cols, _type); +} + +UMat::UMat(int _rows, int _cols, int _type, const Scalar& _s, UMatUsageFlags _usageFlags) +: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) +{ + create(_rows, _cols, _type); + *this = _s; +} + +UMat::UMat(Size _sz, int _type, UMatUsageFlags _usageFlags) +: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) +{ + create( _sz.height, _sz.width, _type ); +} + +UMat::UMat(Size _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags) +: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) +{ + create(_sz.height, _sz.width, _type); + *this = _s; +} + +UMat::UMat(int _dims, const int* _sz, int _type, UMatUsageFlags _usageFlags) +: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) +{ + create(_dims, _sz, _type); +} + +UMat::UMat(int _dims, const int* _sz, int _type, const Scalar& _s, UMatUsageFlags _usageFlags) +: flags(MAGIC_VAL), dims(0), rows(0), cols(0), allocator(0), usageFlags(_usageFlags), u(0), offset(0), size(&rows) +{ + create(_dims, _sz, _type); + *this = _s; +} + +UMat::UMat(const UMat& m) +: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator), + usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows) +{ + addref(); + if( m.dims <= 2 ) + { + step[0] = m.step[0]; step[1] = m.step[1]; + } + else + { + dims = 0; + copySize(m); + } +} + +UMat& UMat::operator=(const UMat& m) +{ + if( this != &m ) + { + const_cast(m).addref(); + release(); + flags = m.flags; + if( dims <= 2 && m.dims <= 2 ) + { + dims = m.dims; + rows = m.rows; + cols = m.cols; + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + copySize(m); + allocator = m.allocator; + if (usageFlags == USAGE_DEFAULT) + usageFlags = m.usageFlags; + u = m.u; + offset = m.offset; + } + return *this; +} + +UMat UMat::clone() const +{ + UMat m; + copyTo(m); + return m; +} + +void UMat::assignTo(UMat& m, int _type) const +{ + if( _type < 0 ) + m = *this; + else + convertTo(m, _type); +} + +void UMat::create(int _rows, int _cols, int _type, UMatUsageFlags _usageFlags) +{ + _type &= TYPE_MASK; + if( dims <= 2 && rows == _rows && cols == _cols && type() == _type && u ) + return; + int sz[] = {_rows, _cols}; + create(2, sz, _type, _usageFlags); +} + +void UMat::create(Size _sz, int _type, UMatUsageFlags _usageFlags) +{ + create(_sz.height, _sz.width, _type, _usageFlags); +} + +void UMat::addref() +{ + if( u ) + CV_XADD(&(u->urefcount), 1); +} + +void UMat::release() +{ + if( u && CV_XADD(&(u->urefcount), -1) == 1 ) + deallocate(); + for(int i = 0; i < dims; i++) + size.p[i] = 0; + u = 0; +} + +bool UMat::empty() const +{ + return u == 0 || total() == 0 || dims == 0; +} + +size_t UMat::total() const +{ + if( dims <= 2 ) + return (size_t)rows * cols; + size_t p = 1; + for( int i = 0; i < dims; i++ ) + p *= size[i]; + return p; +} + MatAllocator* UMat::getStdAllocator() { From ea1e3fb90d060939ee4618824529cc0ae0bf018a Mon Sep 17 00:00:00 2001 From: Quentin Chateau Date: Thu, 22 Oct 2020 14:24:58 +0200 Subject: [PATCH 046/152] Merge pull request #18624 from qchateau:similarity-mask * support similarity masks * add test for similarity threshold * short license in test * use UMat in buildSimilarityMask * fix win32 warnings * fix test indentation * fix umat/mat sync * no in-place argument for erode/dilate --- .../stitching/detail/exposure_compensate.hpp | 21 +++- modules/stitching/src/exposure_compensate.cpp | 99 ++++++++++++++++++- .../test/test_exposure_compensate.cpp | 70 +++++++++++++ modules/stitching/test/test_precomp.hpp | 1 + 4 files changed, 187 insertions(+), 4 deletions(-) create mode 100644 modules/stitching/test/test_exposure_compensate.cpp diff --git a/modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp b/modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp index 2b76d0923d..074c9b6dfb 100644 --- a/modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/exposure_compensate.hpp @@ -115,7 +115,7 @@ public: CV_WRAP GainCompensator() : GainCompensator(1) {} CV_WRAP GainCompensator(int nr_feeds) - : nr_feeds_(nr_feeds) {} + : nr_feeds_(nr_feeds), similarity_threshold_(1) {} void feed(const std::vector &corners, const std::vector &images, const std::vector > &masks) CV_OVERRIDE; void singleFeed(const std::vector &corners, const std::vector &images, @@ -125,11 +125,18 @@ public: CV_WRAP void setMatGains(std::vector& umv) CV_OVERRIDE ; CV_WRAP void setNrFeeds(int nr_feeds) { nr_feeds_ = nr_feeds; } CV_WRAP int getNrFeeds() { return nr_feeds_; } + CV_WRAP void setSimilarityThreshold(double similarity_threshold) { similarity_threshold_ = similarity_threshold; } + CV_WRAP double getSimilarityThreshold() const { return similarity_threshold_; } + void prepareSimilarityMask(const std::vector &corners, const std::vector &images); std::vector gains() const; private: + UMat buildSimilarityMask(InputArray src_array1, InputArray src_array2); + Mat_ gains_; int nr_feeds_; + double similarity_threshold_; + std::vector similarities_; }; /** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image @@ -138,7 +145,8 @@ intensities on each channel independently. class CV_EXPORTS_W ChannelsCompensator : public ExposureCompensator { public: - CV_WRAP ChannelsCompensator(int nr_feeds=1) : nr_feeds_(nr_feeds) {} + CV_WRAP ChannelsCompensator(int nr_feeds=1) + : nr_feeds_(nr_feeds), similarity_threshold_(1) {} void feed(const std::vector &corners, const std::vector &images, const std::vector > &masks) CV_OVERRIDE; CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE; @@ -146,11 +154,14 @@ public: CV_WRAP void setMatGains(std::vector& umv) CV_OVERRIDE; CV_WRAP void setNrFeeds(int nr_feeds) { nr_feeds_ = nr_feeds; } CV_WRAP int getNrFeeds() { return nr_feeds_; } + CV_WRAP void setSimilarityThreshold(double similarity_threshold) { similarity_threshold_ = similarity_threshold; } + CV_WRAP double getSimilarityThreshold() const { return similarity_threshold_; } std::vector gains() const { return gains_; } private: std::vector gains_; int nr_feeds_; + double similarity_threshold_; }; /** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image blocks. @@ -159,12 +170,15 @@ class CV_EXPORTS_W BlocksCompensator : public ExposureCompensator { public: BlocksCompensator(int bl_width=32, int bl_height=32, int nr_feeds=1) - : bl_width_(bl_width), bl_height_(bl_height), nr_feeds_(nr_feeds), nr_gain_filtering_iterations_(2) {} + : bl_width_(bl_width), bl_height_(bl_height), nr_feeds_(nr_feeds), nr_gain_filtering_iterations_(2), + similarity_threshold_(1) {} CV_WRAP void apply(int index, Point corner, InputOutputArray image, InputArray mask) CV_OVERRIDE; CV_WRAP void getMatGains(CV_OUT std::vector& umv) CV_OVERRIDE; CV_WRAP void setMatGains(std::vector& umv) CV_OVERRIDE; CV_WRAP void setNrFeeds(int nr_feeds) { nr_feeds_ = nr_feeds; } CV_WRAP int getNrFeeds() { return nr_feeds_; } + CV_WRAP void setSimilarityThreshold(double similarity_threshold) { similarity_threshold_ = similarity_threshold; } + CV_WRAP double getSimilarityThreshold() const { return similarity_threshold_; } CV_WRAP void setBlockSize(int width, int height) { bl_width_ = width; bl_height_ = height; } CV_WRAP void setBlockSize(Size size) { setBlockSize(size.width, size.height); } CV_WRAP Size getBlockSize() const { return Size(bl_width_, bl_height_); } @@ -184,6 +198,7 @@ private: std::vector gain_maps_; int nr_feeds_; int nr_gain_filtering_iterations_; + double similarity_threshold_; }; /** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image block diff --git a/modules/stitching/src/exposure_compensate.cpp b/modules/stitching/src/exposure_compensate.cpp index 7213349ccc..df2b8779bb 100644 --- a/modules/stitching/src/exposure_compensate.cpp +++ b/modules/stitching/src/exposure_compensate.cpp @@ -90,6 +90,7 @@ void GainCompensator::feed(const std::vector &corners, const std::vector< const int num_images = static_cast(images.size()); Mat accumulated_gains; + prepareSimilarityMask(corners, images); for (int n = 0; n < nr_feeds_; ++n) { @@ -133,6 +134,8 @@ void GainCompensator::singleFeed(const std::vector &corners, const std::v Mat subimg1, subimg2; Mat_ submask1, submask2, intersect; + std::vector::iterator similarity_it = similarities_.begin(); + for (int i = 0; i < num_images; ++i) { for (int j = i; j < num_images; ++j) @@ -147,6 +150,13 @@ void GainCompensator::singleFeed(const std::vector &corners, const std::v submask2 = masks[j].first(Rect(roi.tl() - corners[j], roi.br() - corners[j])).getMat(ACCESS_READ); intersect = (submask1 == masks[i].second) & (submask2 == masks[j].second); + if (!similarities_.empty()) + { + CV_Assert(similarity_it != similarities_.end()); + UMat similarity = *similarity_it++; + bitwise_and(intersect, similarity, intersect); + } + int intersect_count = countNonZero(intersect); N(i, j) = N(j, i) = std::max(1, intersect_count); @@ -298,6 +308,88 @@ void GainCompensator::setMatGains(std::vector& umv) } } +void GainCompensator::prepareSimilarityMask( + const std::vector &corners, const std::vector &images) +{ + if (similarity_threshold_ >= 1) + { + LOGLN(" skipping similarity mask: disabled"); + return; + } + if (!similarities_.empty()) + { + LOGLN(" skipping similarity mask: already set"); + return; + } + + LOGLN(" calculating similarity mask"); + const int num_images = static_cast(images.size()); + for (int i = 0; i < num_images; ++i) + { + for (int j = i; j < num_images; ++j) + { + Rect roi; + if (overlapRoi(corners[i], corners[j], images[i].size(), images[j].size(), roi)) + { + UMat subimg1 = images[i](Rect(roi.tl() - corners[i], roi.br() - corners[i])); + UMat subimg2 = images[j](Rect(roi.tl() - corners[j], roi.br() - corners[j])); + UMat similarity = buildSimilarityMask(subimg1, subimg2); + similarities_.push_back(similarity); + } + } + } +} + +UMat GainCompensator::buildSimilarityMask(InputArray src_array1, InputArray src_array2) +{ + CV_Assert(src_array1.rows() == src_array2.rows() && src_array1.cols() == src_array2.cols()); + CV_Assert(src_array1.type() == src_array2.type()); + CV_Assert(src_array1.type() == CV_8UC3 || src_array1.type() == CV_8UC1); + + Mat src1 = src_array1.getMat(); + Mat src2 = src_array2.getMat(); + + UMat umat_similarity(src1.rows, src1.cols, CV_8UC1); + Mat similarity = umat_similarity.getMat(ACCESS_WRITE); + + if (src1.channels() == 3) + { + for (int y = 0; y < similarity.rows; ++y) + { + for (int x = 0; x < similarity.cols; ++x) + { + Vec vec_diff = + Vec(*src1.ptr>(y, x)) + - Vec(*src2.ptr>(y, x)); + double diff = norm(vec_diff * (1.f / 255.f)); + + *similarity.ptr(y, x) = diff <= similarity_threshold_ ? 255 : 0; + } + } + } + else // if (src1.channels() == 1) + { + for (int y = 0; y < similarity.rows; ++y) + { + for (int x = 0; x < similarity.cols; ++x) + { + float diff = std::abs(static_cast(*src1.ptr(y, x)) + - static_cast(*src2.ptr(y, x))) / 255.f; + + *similarity.ptr(y, x) = diff <= similarity_threshold_ ? 255 : 0; + } + } + } + similarity.release(); + + Mat kernel = getStructuringElement(MORPH_RECT, Size(3,3)); + UMat umat_erode; + erode(umat_similarity, umat_erode, kernel); + dilate(umat_erode, umat_similarity, kernel); + + return umat_similarity; +} + void ChannelsCompensator::feed(const std::vector &corners, const std::vector &images, const std::vector > &masks) { @@ -317,11 +409,15 @@ void ChannelsCompensator::feed(const std::vector &corners, const std::vec // For each channel, feed the channel of each image in a GainCompensator gains_.clear(); gains_.resize(images.size()); + + GainCompensator compensator(getNrFeeds()); + compensator.setSimilarityThreshold(getSimilarityThreshold()); + compensator.prepareSimilarityMask(corners, images); + for (int c = 0; c < 3; ++c) { const std::vector& channels = images_channels[c]; - GainCompensator compensator(getNrFeeds()); compensator.feed(corners, channels, masks); std::vector gains = compensator.gains(); @@ -400,6 +496,7 @@ void BlocksCompensator::feed(const std::vector &corners, const std::vecto { Compensator compensator; compensator.setNrFeeds(getNrFeeds()); + compensator.setSimilarityThreshold(getSimilarityThreshold()); compensator.feed(block_corners, block_images, block_masks); gain_maps_.clear(); diff --git a/modules/stitching/test/test_exposure_compensate.cpp b/modules/stitching/test/test_exposure_compensate.cpp new file mode 100644 index 0000000000..3f34742095 --- /dev/null +++ b/modules/stitching/test/test_exposure_compensate.cpp @@ -0,0 +1,70 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { +namespace { + +double minPSNR(UMat src1, UMat src2) +{ + std::vector src1_channels, src2_channels; + split(src1, src1_channels); + split(src2, src2_channels); + + double psnr = cvtest::PSNR(src1_channels[0], src2_channels[0]); + psnr = std::min(psnr, cvtest::PSNR(src1_channels[1], src2_channels[1])); + return std::min(psnr, cvtest::PSNR(src1_channels[2], src2_channels[2])); +} + +TEST(ExposureCompensate, SimilarityThreshold) +{ + UMat source; + imread(cvtest::TS::ptr()->get_data_path() + "stitching/s1.jpg").copyTo(source); + + UMat image1 = source.clone(); + UMat image2 = source.clone(); + + // Add a big artifact + image2(Rect(150, 150, 100, 100)).setTo(Scalar(0, 0, 255)); + + UMat mask(image1.size(), CV_8U); + mask.setTo(255); + + detail::BlocksChannelsCompensator compensator; + compensator.setNrGainsFilteringIterations(0); // makes it more clear + + // Feed the compensator, image 1 and 2 are perfectly + // identical, except for the red artifact in image 2 + // Apart from that artifact, there is no exposure to compensate + compensator.setSimilarityThreshold(1); + uchar xff = 255; + compensator.feed( + {{}, {}}, + {image1, image2}, + {{mask, xff}, {mask, xff}} + ); + // Verify that the artifact in image 2 did create + // an artifact in image1 during the exposure compensation + UMat image1_result = image1.clone(); + compensator.apply(0, {}, image1_result, mask); + double psnr_no_similarity_mask = minPSNR(image1, image1_result); + EXPECT_LT(psnr_no_similarity_mask, 45); + + // Add a similarity threshold and verify that + // the artifact in image1 is gone + compensator.setSimilarityThreshold(0.1); + compensator.feed( + {{}, {}}, + {image1, image2}, + {{mask, xff}, {mask, xff}} + ); + image1_result = image1.clone(); + compensator.apply(0, {}, image1_result, mask); + double psnr_similarity_mask = minPSNR(image1, image1_result); + EXPECT_GT(psnr_similarity_mask, 300); +} + +} // namespace +} // namespace opencv_test diff --git a/modules/stitching/test/test_precomp.hpp b/modules/stitching/test/test_precomp.hpp index f3ebc682c0..8e7709a7ec 100644 --- a/modules/stitching/test/test_precomp.hpp +++ b/modules/stitching/test/test_precomp.hpp @@ -8,6 +8,7 @@ #include "opencv2/stitching.hpp" #include "opencv2/stitching/detail/matchers.hpp" #include "opencv2/stitching/detail/blenders.hpp" +#include "opencv2/stitching/detail/exposure_compensate.hpp" #ifdef HAVE_OPENCV_XFEATURES2D #include "opencv2/xfeatures2d/nonfree.hpp" From 61a8cf8ba7ba540904db432e35690fb72cc683b2 Mon Sep 17 00:00:00 2001 From: Justin Frank Date: Tue, 20 Oct 2020 17:31:34 -0700 Subject: [PATCH 047/152] Fix TypeError when building for WebAssembly with Python 3 --- modules/js/src/make_umd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/js/src/make_umd.py b/modules/js/src/make_umd.py index 8e50da585d..08d9e39e13 100644 --- a/modules/js/src/make_umd.py +++ b/modules/js/src/make_umd.py @@ -103,7 +103,7 @@ def make_umd(opencvjs, cvjs): Module = {}; return cv(Module); })); - """ % (content)).lstrip()) + """ % (content)).lstrip().encode()) if __name__ == "__main__": if len(sys.argv) > 2: From 72dfd4846e184c480f27f137f3b078b538d1b017 Mon Sep 17 00:00:00 2001 From: Giles Payne Date: Fri, 23 Oct 2020 20:19:36 +0900 Subject: [PATCH 048/152] Merge pull request #18637 from komakai:build-for-distribution Add support for Swift version independence * Build for distribution (Swift version independence) when new Xcode build system is available * Add module map and set "Defines Modules" flag --- modules/objc/generator/gen_objc.py | 12 +++- .../generator/templates/cmakelists.template | 56 +++++++++++++------ 2 files changed, 49 insertions(+), 19 deletions(-) diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py index c7eabdfb0d..87e42e821d 100755 --- a/modules/objc/generator/gen_objc.py +++ b/modules/objc/generator/gen_objc.py @@ -1347,7 +1347,17 @@ typedef NS_ENUM(int, {2}) {{ def finalize(self, output_objc_path): opencv_header_file = os.path.join(output_objc_path, framework_name + ".h") - self.save(opencv_header_file, '\n'.join(['#import "%s"' % os.path.basename(f) for f in self.header_files])) + opencv_header = "#import \n\n" + opencv_header += "// ! Project version number\nFOUNDATION_EXPORT double " + framework_name + "VersionNumber;\n\n" + opencv_header += "// ! Project version string\nFOUNDATION_EXPORT const unsigned char " + framework_name + "VersionString[];\n\n" + opencv_header += "\n".join(["#import <" + framework_name + "/%s>" % os.path.basename(f) for f in self.header_files]) + self.save(opencv_header_file, opencv_header) + opencv_modulemap_file = os.path.join(output_objc_path, framework_name + ".modulemap") + opencv_modulemap = "framework module " + framework_name + " {\n" + opencv_modulemap += " umbrella header \"" + framework_name + ".h\"\n" + opencv_modulemap += "\n".join([" header \"%s\"" % os.path.basename(f) for f in self.header_files]) + opencv_modulemap += "\n export *\n module * {export *}\n}\n" + self.save(opencv_modulemap_file, opencv_modulemap) cmakelist_template = read_contents(os.path.join(SCRIPT_DIR, 'templates/cmakelists.template')) cmakelist = Template(cmakelist_template).substitute(modules = ";".join(modules), framework = framework_name) self.save(os.path.join(dstdir, "CMakeLists.txt"), cmakelist) diff --git a/modules/objc/generator/templates/cmakelists.template b/modules/objc/generator/templates/cmakelists.template index e928a6d21a..2cfc2474cd 100644 --- a/modules/objc/generator/templates/cmakelists.template +++ b/modules/objc/generator/templates/cmakelists.template @@ -13,32 +13,52 @@ set (SUPPRESS_WARNINGS_FLAGS "-Wno-incomplete-umbrella") set (CMAKE_CXX_FLAGS "$${CMAKE_CXX_FLAGS} $${OBJC_COMPILE_FLAGS} $${SUPPRESS_WARNINGS_FLAGS}") # grab the files -file(GLOB_RECURSE objc_sources "objc/*\.h" "objc/*\.m" "objc/*\.mm" "objc/*\.swift") +file(GLOB_RECURSE objc_sources "objc/*\.h" "objc/*\.m" "objc/*\.mm" "objc/*\.swift" "objc/*\.modulemap") file(GLOB_RECURSE objc_headers "*\.h") -add_library(opencv_objc_framework STATIC $${objc_sources}) +add_library($framework STATIC $${objc_sources}) -set_target_properties(opencv_objc_framework PROPERTIES LINKER_LANGUAGE CXX) +set_target_properties($framework PROPERTIES LINKER_LANGUAGE CXX) -target_include_directories(opencv_objc_framework PRIVATE "$${BUILD_ROOT}") -target_include_directories(opencv_objc_framework PRIVATE "$${BUILD_ROOT}/install/include") -target_include_directories(opencv_objc_framework PRIVATE "$${BUILD_ROOT}/install/include/opencv2") +target_include_directories($framework PRIVATE "$${BUILD_ROOT}") +target_include_directories($framework PRIVATE "$${BUILD_ROOT}/install/include") +target_include_directories($framework PRIVATE "$${BUILD_ROOT}/install/include/opencv2") foreach(m $${MODULES}) - target_include_directories(opencv_objc_framework PRIVATE "$${BUILD_ROOT}/modules/objc/gen/objc/$${m}") + target_include_directories($framework PRIVATE "$${BUILD_ROOT}/modules/objc/gen/objc/$${m}") endforeach() -install(TARGETS opencv_objc_framework LIBRARY DESTINATION lib) +install(TARGETS $framework LIBRARY DESTINATION lib) enable_language(Swift) # Additional target properties -set_target_properties(opencv_objc_framework PROPERTIES - OUTPUT_NAME "$framework" - ARCHIVE_OUTPUT_DIRECTORY "$${BUILD_ROOT}/lib" - XCODE_ATTRIBUTE_SWIFT_VERSION 5.0 - XCODE_ATTRIBUTE_OTHER_SWIFT_FLAGS "-Xcc $${SUPPRESS_WARNINGS_FLAGS}" - FRAMEWORK TRUE - MACOSX_FRAMEWORK_IDENTIFIER org.opencv.$framework - PUBLIC_HEADER "$${objc_headers}" - DEFINE_SYMBOL CVAPI_EXPORTS - ) +if (CMAKE_XCODE_BUILD_SYSTEM GREATER_EQUAL 12) + set_target_properties($framework PROPERTIES + OUTPUT_NAME "$framework" + ARCHIVE_OUTPUT_DIRECTORY "$${BUILD_ROOT}/lib" + XCODE_ATTRIBUTE_SWIFT_VERSION 5.0 + XCODE_ATTRIBUTE_DEFINES_MODULE YES + XCODE_ATTRIBUTE_BUILD_LIBRARY_FOR_DISTRIBUTION YES + XCODE_ATTRIBUTE_OTHER_SWIFT_FLAGS "-Xcc $${SUPPRESS_WARNINGS_FLAGS}" + XCODE_ATTRIBUTE_MODULEMAP_FILE objc/$framework.modulemap + XCODE_ATTRIBUTE_PRODUCT_BUNDLE_IDENTIFIER org.opencv.$framework + FRAMEWORK TRUE + MACOSX_FRAMEWORK_IDENTIFIER org.opencv.$framework + PUBLIC_HEADER "$${objc_headers}" + DEFINE_SYMBOL CVAPI_EXPORTS + ) +else() + set_target_properties($framework PROPERTIES + OUTPUT_NAME "$framework" + ARCHIVE_OUTPUT_DIRECTORY "$${BUILD_ROOT}/lib" + XCODE_ATTRIBUTE_SWIFT_VERSION 5.0 + XCODE_ATTRIBUTE_DEFINES_MODULE YES + XCODE_ATTRIBUTE_OTHER_SWIFT_FLAGS "-Xcc $${SUPPRESS_WARNINGS_FLAGS}" + XCODE_ATTRIBUTE_MODULEMAP_FILE objc/$framework.modulemap + XCODE_ATTRIBUTE_PRODUCT_BUNDLE_IDENTIFIER org.opencv.$framework + FRAMEWORK TRUE + MACOSX_FRAMEWORK_IDENTIFIER org.opencv.$framework + PUBLIC_HEADER "$${objc_headers}" + DEFINE_SYMBOL CVAPI_EXPORTS + ) +endif() From c71f2714c6c986fac00a46f25c0a49dc7774c4a6 Mon Sep 17 00:00:00 2001 From: ann <44146733+APrigarina@users.noreply.github.com> Date: Fri, 23 Oct 2020 21:42:45 +0300 Subject: [PATCH 049/152] Merge pull request #18003 from APrigarina:curved_qrcodes_decoding Detection and decoding of curved QR-codes * temp changes for curved qrcodes * added api for curved qr code decoding * fixed prototypes * refactored curved qr code decoding * refactored curved qr code decoding 2nd part * refactored curved qr code decoding 3rd part * refactored curved qr code decoding 4th part * added tests for curved qr code decoding * refactored curved qr code decoding 5th part --- .../objdetect/include/opencv2/objdetect.hpp | 29 +- modules/objdetect/src/qrcode.cpp | 1387 ++++++++++++++++- modules/objdetect/test/test_qrcode.cpp | 95 ++ 3 files changed, 1465 insertions(+), 46 deletions(-) diff --git a/modules/objdetect/include/opencv2/objdetect.hpp b/modules/objdetect/include/opencv2/objdetect.hpp index ea7b1ac801..0387b10239 100644 --- a/modules/objdetect/include/opencv2/objdetect.hpp +++ b/modules/objdetect/include/opencv2/objdetect.hpp @@ -702,6 +702,15 @@ public: */ CV_WRAP cv::String decode(InputArray img, InputArray points, OutputArray straight_qrcode = noArray()); + /** @brief Decodes QR code on a curved surface in image once it's found by the detect() method. + + Returns UTF8-encoded output string or empty string if the code cannot be decoded. + @param img grayscale or color (BGR) image containing QR code. + @param points Quadrangle vertices found by detect() method (or some other algorithm). + @param straight_qrcode The optional output image containing rectified and binarized QR code + */ + CV_WRAP cv::String decodeCurved(InputArray img, InputArray points, OutputArray straight_qrcode = noArray()); + /** @brief Both detects and decodes QR code @param img grayscale or color (BGR) image containing QR code. @@ -709,7 +718,17 @@ public: @param straight_qrcode The optional output image containing rectified and binarized QR code */ CV_WRAP cv::String detectAndDecode(InputArray img, OutputArray points=noArray(), - OutputArray straight_qrcode = noArray()); + OutputArray straight_qrcode = noArray()); + + /** @brief Both detects and decodes QR code on a curved surface + + @param img grayscale or color (BGR) image containing QR code. + @param points optional output array of vertices of the found QR code quadrangle. Will be empty if not found. + @param straight_qrcode The optional output image containing rectified and binarized QR code + */ + CV_WRAP cv::String detectAndDecodeCurved(InputArray img, OutputArray points=noArray(), + OutputArray straight_qrcode = noArray()); + /** @brief Detects QR codes in image and returns the vector of the quadrangles containing the codes. @param img grayscale or color (BGR) image containing (or not) QR codes. @param points Output vector of vector of vertices of the minimum-area quadrangle containing the codes. @@ -801,6 +820,14 @@ CV_EXPORTS bool detectQRCode(InputArray in, std::vector &points, double e */ CV_EXPORTS bool decodeQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode = noArray()); +/** @brief Decode QR code on a curved surface in image and return text that is encrypted in QR code. + @param in Matrix of the type CV_8UC1 containing an image where QR code are detected. + @param points Input vector of vertices of a quadrangle of minimal area that describes QR code. + @param decoded_info String information that is encrypted in QR code. + @param straight_qrcode Matrix of the type CV_8UC1 containing an binary straight QR code. + */ +CV_EXPORTS bool decodeCurvedQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode = noArray()); + //! @} objdetect } diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp index 5b4bb61e9e..5b86f74614 100644 --- a/modules/objdetect/src/qrcode.cpp +++ b/modules/objdetect/src/qrcode.cpp @@ -18,6 +18,7 @@ #include #include #include +#include namespace cv { @@ -63,7 +64,40 @@ static void updatePointsResult(OutputArray points_, const vector& point } } +static Point2f intersectionLines(Point2f a1, Point2f a2, Point2f b1, Point2f b2) +{ + const float divisor = (a1.x - a2.x) * (b1.y - b2.y) - (a1.y - a2.y) * (b1.x - b2.x); + const float eps = 0.001f; + if (abs(divisor) < eps) + return a2; + Point2f result_square_angle( + ((a1.x * a2.y - a1.y * a2.x) * (b1.x - b2.x) - + (b1.x * b2.y - b1.y * b2.x) * (a1.x - a2.x)) / + divisor, + ((a1.x * a2.y - a1.y * a2.x) * (b1.y - b2.y) - + (b1.x * b2.y - b1.y * b2.x) * (a1.y - a2.y)) / + divisor + ); + return result_square_angle; +} +// / | b +// / | +// / | +// a/ | c + +static inline double getCosVectors(Point2f a, Point2f b, Point2f c) +{ + return ((a - b).x * (c - b).x + (a - b).y * (c - b).y) / (norm(a - b) * norm(c - b)); +} + +static bool arePointsNearest(Point2f a, Point2f b, float delta = 0.0) +{ + if ((abs(a.x - b.x) < delta) && (abs(a.y - b.y) < delta)) + return true; + else + return false; +} class QRDetect { @@ -74,15 +108,13 @@ public: Mat getBinBarcode() { return bin_barcode; } Mat getStraightBarcode() { return straight_barcode; } vector getTransformationPoints() { return transformation_points; } - static Point2f intersectionLines(Point2f a1, Point2f a2, Point2f b1, Point2f b2); protected: vector searchHorizontalLines(); vector separateVerticalLines(const vector &list_lines); vector extractVerticalLines(const vector &list_lines, double eps); void fixationPoints(vector &local_point); vector getQuadrilateral(vector angle_list); - bool testBypassRoute(vector hull, int start, int finish); - inline double getCosVectors(Point2f a, Point2f b, Point2f c); + bool testByPassRoute(vector hull, int start, int finish); Mat barcode, bin_barcode, resized_barcode, resized_bin_barcode, straight_barcode; vector localization_points, transformation_points; @@ -361,7 +393,6 @@ void QRDetect::fixationPoints(vector &local_point) Point2f(static_cast(bin_barcode.cols - 1), static_cast(bin_barcode.rows - 1)))); - vector list_area_pnt; list_area_pnt.push_back(current_point); @@ -629,7 +660,6 @@ bool QRDetect::computeTransformationPoints() transformation_points.push_back( intersectionLines(down_left_edge_point, down_max_delta_point, up_right_edge_point, up_max_delta_point)); - vector quadrilateral = getQuadrilateral(transformation_points); transformation_points = quadrilateral; @@ -643,23 +673,8 @@ bool QRDetect::computeTransformationPoints() return true; } -Point2f QRDetect::intersectionLines(Point2f a1, Point2f a2, Point2f b1, Point2f b2) -{ - Point2f result_square_angle( - ((a1.x * a2.y - a1.y * a2.x) * (b1.x - b2.x) - - (b1.x * b2.y - b1.y * b2.x) * (a1.x - a2.x)) / - ((a1.x - a2.x) * (b1.y - b2.y) - - (a1.y - a2.y) * (b1.x - b2.x)), - ((a1.x * a2.y - a1.y * a2.x) * (b1.y - b2.y) - - (b1.x * b2.y - b1.y * b2.x) * (a1.y - a2.y)) / - ((a1.x - a2.x) * (b1.y - b2.y) - - (a1.y - a2.y) * (b1.x - b2.x)) - ); - return result_square_angle; -} - // test function (if true then ------> else <------ ) -bool QRDetect::testBypassRoute(vector hull, int start, int finish) +bool QRDetect::testByPassRoute(vector hull, int start, int finish) { CV_TRACE_FUNCTION(); int index_hull = start, next_index_hull, hull_size = (int)hull.size(); @@ -764,7 +779,7 @@ vector QRDetect::getQuadrilateral(vector angle_list) int index_hull, extra_index_hull, next_index_hull, extra_next_index_hull; Point result_side_begin[4], result_side_end[4]; - bool bypass_orientation = testBypassRoute(hull, start_line[0], finish_line[0]); + bool bypass_orientation = testByPassRoute(hull, start_line[0], finish_line[0]); min_norm = std::numeric_limits::max(); index_hull = start_line[0]; @@ -805,7 +820,7 @@ vector QRDetect::getQuadrilateral(vector angle_list) min_norm = std::numeric_limits::max(); index_hull = start_line[1]; - bypass_orientation = testBypassRoute(hull, start_line[1], finish_line[1]); + bypass_orientation = testByPassRoute(hull, start_line[1], finish_line[1]); do { if (bypass_orientation) { next_index_hull = index_hull + 1; } @@ -840,8 +855,8 @@ vector QRDetect::getQuadrilateral(vector angle_list) result_side_end[1] = angle_list[1]; } - bypass_orientation = testBypassRoute(hull, start_line[0], unstable_pnt); - const bool extra_bypass_orientation = testBypassRoute(hull, finish_line[1], unstable_pnt); + bypass_orientation = testByPassRoute(hull, start_line[0], unstable_pnt); + const bool extra_bypass_orientation = testByPassRoute(hull, finish_line[1], unstable_pnt); vector result_angle_list(4), test_result_angle_list(4); double min_diff_area = std::numeric_limits::max(); @@ -918,16 +933,6 @@ vector QRDetect::getQuadrilateral(vector angle_list) return result_angle_list; } -// / | b -// / | -// / | -// a/ | c - -inline double QRDetect::getCosVectors(Point2f a, Point2f b, Point2f c) -{ - return ((a - b).x * (c - b).x + (a - b).y * (c - b).y) / (norm(a - b) * norm(c - b)); -} - struct QRCodeDetector::Impl { public: @@ -975,17 +980,79 @@ public: Mat getStraightBarcode() { return straight; } size_t getVersion() { return version; } std::string getDecodeInformation() { return result_info; } - bool fullDecodingProcess(); + bool straightDecodingProcess(); + bool curvedDecodingProcess(); protected: bool updatePerspective(); bool versionDefinition(); bool samplingForVersion(); bool decodingProcess(); - Mat original, no_border_intermediate, intermediate, straight; + inline double pointPosition(Point2f a, Point2f b , Point2f c); + float distancePointToLine(Point2f a, Point2f b , Point2f c); + void getPointsInsideQRCode(const vector &angle_list); + bool computeClosestPoints(const vector &result_integer_hull); + bool computeSidesPoints(const vector &result_integer_hull); + vector getPointsNearUnstablePoint(const vector &side, int start, int end, int step); + bool findAndAddStablePoint(const vector &result_integer_hull); + bool findIndexesCurvedSides(); + bool findIncompleteIndexesCurvedSides(); + Mat getPatternsMask(); + Point findClosestZeroPoint(Point2f original_point); + bool findPatternsContours(vector > &patterns_contours); + bool findPatternsVerticesPoints(vector > &patterns_vertices_points); + bool findTempPatternsAddingPoints(vector > > &temp_patterns_add_points); + bool computePatternsAddingPoints(std::map > &patterns_add_points); + bool addPointsToSides(); + void completeAndSortSides(); + vector > computeSpline(const vector &x_arr, const vector &y_arr); + bool createSpline(vector > &spline_lines); + bool divideIntoEvenSegments(vector > &segments_points); + bool straightenQRCodeInParts(); + bool preparingCurvedQRCodes(); + + const static int NUM_SIDES = 2; + Mat original, bin_barcode, no_border_intermediate, intermediate, straight, curved_to_straight, test_image; vector original_points; + vector original_curved_points; + vector qrcode_locations; + vector > closest_points; + vector > sides_points; + std::pair unstable_pair; + vector curved_indexes, curved_incomplete_indexes; + std::map > complete_curved_sides; std::string result_info; uint8_t version, version_size; float test_perspective_size; + struct sortPairAsc + { + bool operator()(const std::pair &a, + const std::pair &b) const + { + return a.second < b.second; + } + }; + struct sortPairDesc + { + bool operator()(const std::pair &a, + const std::pair &b) const + { + return a.second > b.second; + } + }; + struct sortPointsByX + { + bool operator()(const Point &a, const Point &b) const + { + return a.x < b.x; + } + }; + struct sortPointsByY + { + bool operator()(const Point &a, const Point &b) const + { + return a.y < b.y; + } + }; }; void QRDecode::init(const Mat &src, const vector &points) @@ -993,6 +1060,8 @@ void QRDecode::init(const Mat &src, const vector &points) CV_TRACE_FUNCTION(); vector bbox = points; original = src.clone(); + test_image = src.clone(); + adaptiveThreshold(original, bin_barcode, 255, ADAPTIVE_THRESH_GAUSSIAN_C, THRESH_BINARY, 83, 2); intermediate = Mat::zeros(original.size(), CV_8UC1); original_points = bbox; version = 0; @@ -1001,11 +1070,1168 @@ void QRDecode::init(const Mat &src, const vector &points) result_info = ""; } +inline double QRDecode::pointPosition(Point2f a, Point2f b , Point2f c) +{ + return (a.x - b.x) * (c.y - b.y) - (c.x - b.x) * (a.y - b.y); +} + +float QRDecode::distancePointToLine(Point2f a, Point2f b , Point2f c) +{ + float A, B, C, result; + A = c.y - b.y; + B = c.x - b.x; + C = c.x * b.y - b.x * c.y; + float dist = sqrt(A*A + B*B); + if (dist == 0) return 0; + result = abs((A * a.x - B * a.y + C)) / dist; + + return result; +} + +void QRDecode::getPointsInsideQRCode(const vector &angle_list) +{ + CV_TRACE_FUNCTION(); + size_t angle_size = angle_list.size(); + Mat contour_mask = Mat::zeros(bin_barcode.size(), CV_8UC1); + for (size_t i = 0; i < angle_size; i++) + { + LineIterator line_iter(bin_barcode, angle_list[ i % angle_size], + angle_list[(i + 1) % angle_size]); + for(int j = 0; j < line_iter.count; j++, ++line_iter) + { + Point p = line_iter.pos(); + contour_mask.at(p + Point(1, 1)) = 255; + } + } + Point2f center_point = intersectionLines(angle_list[0], angle_list[2], + angle_list[1], angle_list[3]); + floodFill(contour_mask, center_point, 255, 0, Scalar(), Scalar(), FLOODFILL_FIXED_RANGE); + + vector locations; + findNonZero(contour_mask, locations); + + Mat fill_bin_barcode = bin_barcode.clone(); + Mat qrcode_mask = Mat::zeros(bin_barcode.rows + 2, bin_barcode.cols + 2, CV_8UC1); + uint8_t value, mask_value; + for(size_t i = 0; i < locations.size(); i++) + { + value = bin_barcode.at(locations[i]); + mask_value = qrcode_mask.at(locations[i] + Point(1, 1)); + if (value == 0 && mask_value == 0) + { + floodFill(fill_bin_barcode, qrcode_mask, locations[i], 255, + 0, Scalar(), Scalar(), FLOODFILL_MASK_ONLY); + } + } + Mat qrcode_mask_roi = qrcode_mask(Range(1, qrcode_mask.rows - 1), Range(1, qrcode_mask.cols - 1)); + findNonZero(qrcode_mask_roi, qrcode_locations); +} + +bool QRDecode::computeClosestPoints(const vector &result_integer_hull) +{ + CV_TRACE_FUNCTION(); + double min_norm, max_norm = 0.0; + size_t idx_min; + for (size_t i = 0; i < original_points.size(); i++) + { + min_norm = std::numeric_limits::max(); + + Point closest_pnt; + for (size_t j = 0; j < result_integer_hull.size(); j++) + { + Point integer_original_point = original_points[i]; + double temp_norm = norm(integer_original_point - result_integer_hull[j]); + if (temp_norm < min_norm) + { + min_norm = temp_norm; + closest_pnt = result_integer_hull[j]; + idx_min = j; + } + } + if (min_norm > max_norm) + { + max_norm = min_norm; + unstable_pair = std::pair(i, closest_pnt); + } + closest_points.push_back(std::pair(idx_min, closest_pnt)); + } + + if (closest_points.size() != 4) + { + return false; + } + + return true; +} + +bool QRDecode::computeSidesPoints(const vector &result_integer_hull) +{ + size_t num_closest_points = closest_points.size(); + vector points; + + for(size_t i = 0; i < num_closest_points; i++) + { + points.clear(); + size_t start = closest_points[i].first, + end = closest_points[(i + 1) % num_closest_points].first; + if (start < end) + { + points.insert(points.end(), + result_integer_hull.begin() + start, + result_integer_hull.begin() + end + 1); + } + else + { + points.insert(points.end(), + result_integer_hull.begin() + start, + result_integer_hull.end()); + points.insert(points.end(), + result_integer_hull.begin(), + result_integer_hull.begin() + end + 1); + } + if (abs(result_integer_hull[start].x - result_integer_hull[end].x) > + abs(result_integer_hull[start].y - result_integer_hull[end].y)) + { + if (points.front().x > points.back().x) + { + reverse(points.begin(), points.end()); + } + } + else + { + if (points.front().y > points.back().y) + { + reverse(points.begin(), points.end()); + } + } + if (points.empty()) + { + return false; + } + sides_points.push_back(points); + } + + return true; +} + +vector QRDecode::getPointsNearUnstablePoint(const vector &side, int start, int end, int step) +{ + vector points; + Point p1, p2, p3; + + double max_neighbour_angle = 1.0; + int index_max_angle = start + step; + bool enough_points = true; + + if(side.size() < 3) + { + points.insert(points.end(), side.begin(), side.end()); + return points; + } + const double cos_angle_threshold = -0.97; + for (int i = start + step; i != end; i+= step) + { + p1 = side[i + step]; + if (norm(p1 - side[i]) < 5) { continue; } + p2 = side[i]; + if (norm(p2 - side[i - step]) < 5) { continue; } + p3 = side[i - step]; + + double neighbour_angle = getCosVectors(p1, p2, p3); + neighbour_angle = floor(neighbour_angle*1000)/1000; + + if ((neighbour_angle <= max_neighbour_angle) && (neighbour_angle < cos_angle_threshold)) + { + max_neighbour_angle = neighbour_angle; + index_max_angle = i; + } + else if (i == end - step) + { + enough_points = false; + index_max_angle = i; + } + } + + if (enough_points) + { + p1 = side[index_max_angle + step]; + p2 = side[index_max_angle]; + p3 = side[index_max_angle - step]; + + points.push_back(p1); + points.push_back(p2); + points.push_back(p3); + } + else + { + p1 = side[index_max_angle]; + p2 = side[index_max_angle - step]; + + points.push_back(p1); + points.push_back(p2); + } + + return points; +} + +bool QRDecode::findAndAddStablePoint(const vector &result_integer_hull) +{ + size_t idx_unstable_point = unstable_pair.first; + Point unstable_point = unstable_pair.second; + + vector current_side_points, next_side_points; + Point a1, a2, b1, b2; + int start_current, end_current, step_current, start_next, end_next, step_next; + vector::iterator it_a, it_b; + + vector ¤t_side = sides_points[(idx_unstable_point + 3) % 4]; + vector &next_side = sides_points[idx_unstable_point]; + + if(current_side.size() < 2 || next_side.size() < 2) + { + return false; + } + + if(arePointsNearest(unstable_point, current_side.front(), 3.0)) + { + start_current = (int)current_side.size() - 1; + end_current = 0; + step_current = -1; + it_a = current_side.begin(); + } + else if(arePointsNearest(unstable_point, current_side.back(), 3.0)) + { + start_current = 0; + end_current = (int)current_side.size() - 1; + step_current = 1; + it_a = current_side.end() - 1; + } + else + { + return false; + } + if(arePointsNearest(unstable_point, next_side.front(), 3.0)) + { + start_next = (int)next_side.size() - 1; + end_next = 0; + step_next = -1; + it_b = next_side.begin(); + } + else if(arePointsNearest(unstable_point, next_side.back(), 3.0)) + { + start_next = 0; + end_next = (int)next_side.size() - 1; + step_next = 1; + it_b = next_side.end() - 1; + } + else + { + return false; + } + current_side_points = getPointsNearUnstablePoint(current_side, start_current, end_current, step_current); + next_side_points = getPointsNearUnstablePoint(next_side, start_next, end_next, step_next); + + if (current_side_points.size() < 2 || next_side_points.size() < 2) + { + return false; + } + + a1 = current_side_points[0]; + a2 = current_side_points[1]; + + b1 = next_side_points[0]; + b2 = next_side_points[1]; + + if(norm(a1 - b1) < 10 && next_side_points.size() > 2) + { + b1 = next_side_points[1]; + b2 = next_side_points[2]; + } + + Point stable_point = intersectionLines(a1, a2, b1, b2); + + const double max_side = std::max(bin_barcode.size().width, bin_barcode.size().height); + if ((abs(stable_point.x) > max_side) || (abs(stable_point.y) > max_side)) + { + return false; + } + + while (*it_a != a1) + { + it_a = current_side.erase(it_a); + if (it_a == current_side.end()) + { + it_a -= step_current; + } + Point point_to_remove_from_current = *it_a; + if (point_to_remove_from_current.x > max_side || point_to_remove_from_current.y > max_side) + { + break; + } + } + while (*it_b != b1) + { + it_b = next_side.erase(it_b); + if (it_b == next_side.end()) + { + it_b -= step_next; + } + Point point_to_remove_from_next = *it_b; + if (point_to_remove_from_next.x > max_side || point_to_remove_from_next.y > max_side) + { + break; + } + } + + bool add_stable_point = true; + + for (size_t i = 0; i < result_integer_hull.size(); i++) + { + if(arePointsNearest(stable_point, original_points[i], 3.0)) + { + add_stable_point = false; + break; + } + } + + if(add_stable_point) + { + current_side.insert(it_a, stable_point); + next_side.insert(it_b, stable_point); + closest_points[unstable_pair.first].second = stable_point; + } + else + { + stable_point = original_points[unstable_pair.first]; + closest_points[unstable_pair.first].second = stable_point; + current_side.insert(it_a, stable_point); + next_side.insert(it_b, stable_point); + } + + return true; +} + +bool QRDecode::findIndexesCurvedSides() +{ + double max_dist_to_arc_side = 0.0; + size_t num_closest_points = closest_points.size(); + int idx_curved_current = -1, idx_curved_opposite = -1; + + for (size_t i = 0; i < num_closest_points; i++) + { + double dist_to_arc = 0.0; + + Point arc_start = closest_points[i].second; + Point arc_end = closest_points[(i + 1) % num_closest_points].second; + + for (size_t j = 0; j < sides_points[i].size(); j++) + { + Point arc_point = sides_points[i][j]; + double dist = distancePointToLine(arc_point, arc_start, arc_end); + dist_to_arc += dist; + } + dist_to_arc /= sides_points[i].size(); + + if (dist_to_arc > max_dist_to_arc_side) + { + max_dist_to_arc_side = dist_to_arc; + idx_curved_current = (int)i; + idx_curved_opposite = (int)(i + 2) % num_closest_points; + } + } + if (idx_curved_current == -1 || idx_curved_opposite == -1) + { + return false; + } + + curved_indexes.push_back(idx_curved_current); + curved_indexes.push_back(idx_curved_opposite); + + return true; +} + +bool QRDecode::findIncompleteIndexesCurvedSides() +{ + int num_closest_points = (int)closest_points.size(); + + for (int i = 0; i < NUM_SIDES; i++) + { + int idx_side = curved_indexes[i]; + int side_size = (int)sides_points[idx_side].size(); + + double max_norm = norm(closest_points[idx_side].second - + closest_points[(idx_side + 1) % num_closest_points].second); + double real_max_norm = 0; + + for (int j = 0; j < side_size - 1; j++) + { + double temp_norm = norm(sides_points[idx_side][j] - + sides_points[idx_side][j + 1]); + if (temp_norm > real_max_norm) + { + real_max_norm = temp_norm; + } + } + if (real_max_norm > (0.5 * max_norm)) + { + curved_incomplete_indexes.push_back(curved_indexes[i]); + } + + } + + if (curved_incomplete_indexes.size() == 0) + { + return false; + } + return true; +} + +Point QRDecode::findClosestZeroPoint(Point2f original_point) +{ + int orig_x = static_cast(original_point.x); + int orig_y = static_cast(original_point.y); + uint8_t value; + Point zero_point; + + const int step = 2; + for (int i = orig_x - step; i >= 0 && i <= orig_x + step; i++) + { + for (int j = orig_y - step; j >= 0 && j <= orig_y + step; j++) + { + Point p(i, j); + value = bin_barcode.at(p); + if (value == 0) zero_point = p; + } + } + + return zero_point; +} + +Mat QRDecode::getPatternsMask() +{ + Mat mask(bin_barcode.rows + 2, bin_barcode.cols + 2, CV_8UC1, Scalar(0)); + Mat patterns_mask(bin_barcode.rows + 2, bin_barcode.cols + 2, CV_8UC1, Scalar(0)); + Mat fill_bin_barcode = bin_barcode.clone(); + for (size_t i = 0; i < original_points.size(); i++) + { + if (i == 2) continue; + Point p = findClosestZeroPoint(original_points[i]); + floodFill(fill_bin_barcode, mask, p, 255, + 0, Scalar(), Scalar(), FLOODFILL_MASK_ONLY); + patterns_mask += mask; + } + Mat mask_roi = patterns_mask(Range(1, bin_barcode.rows - 1), Range(1, bin_barcode.cols - 1)); + + return mask_roi; +} + +bool QRDecode::findPatternsContours(vector > &patterns_contours) +{ + Mat patterns_mask = getPatternsMask(); + findContours(patterns_mask, patterns_contours, RETR_EXTERNAL, CHAIN_APPROX_NONE, Point(0, 0)); + if (patterns_contours.size() != 3) { return false; } + return true; +} + +bool QRDecode::findPatternsVerticesPoints(vector > &patterns_vertices_points) +{ + vector > patterns_contours; + if(!findPatternsContours(patterns_contours)) + { + return false; + } + const int num_vertices = 4; + for(size_t i = 0; i < patterns_contours.size(); i++) + { + vector convexhull_contours, new_convexhull_contours; + convexHull(patterns_contours[i], convexhull_contours); + + size_t number_pnts_in_hull = convexhull_contours.size(); + vector > cos_angles_in_hull; + vector min_angle_pnts_indexes; + + for(size_t j = 1; j < number_pnts_in_hull + 1; j++) + { + double cos_angle = getCosVectors(convexhull_contours[(j - 1) % number_pnts_in_hull], + convexhull_contours[ j % number_pnts_in_hull], + convexhull_contours[(j + 1) % number_pnts_in_hull]); + cos_angles_in_hull.push_back(std::pair(j, cos_angle)); + } + + sort(cos_angles_in_hull.begin(), cos_angles_in_hull.end(), sortPairDesc()); + + for (size_t j = 0; j < cos_angles_in_hull.size(); j++) + { + bool add_edge = true; + for(size_t k = 0; k < min_angle_pnts_indexes.size(); k++) + { + if(norm(convexhull_contours[cos_angles_in_hull[j].first % number_pnts_in_hull] - + convexhull_contours[min_angle_pnts_indexes[k] % number_pnts_in_hull]) < 3) + { + add_edge = false; + } + } + if (add_edge) + { + min_angle_pnts_indexes.push_back(cos_angles_in_hull[j].first % number_pnts_in_hull); + } + if ((int)min_angle_pnts_indexes.size() == num_vertices) { break; } + } + sort(min_angle_pnts_indexes.begin(), min_angle_pnts_indexes.end()); + + vector contour_vertices_points; + + for (size_t k = 0; k < min_angle_pnts_indexes.size(); k++) + { + contour_vertices_points.push_back(convexhull_contours[min_angle_pnts_indexes[k]]); + } + patterns_vertices_points.push_back(contour_vertices_points); + } + if (patterns_vertices_points.size() != 3) + { + return false; + } + + return true; +} + +bool QRDecode::findTempPatternsAddingPoints(vector > > &temp_patterns_add_points) +{ + vector >patterns_contours, patterns_vertices_points; + if(!findPatternsVerticesPoints(patterns_vertices_points)) + { + return false; + } + if(!findPatternsContours(patterns_contours)) + { + return false; + } + + for (size_t i = 0; i < curved_incomplete_indexes.size(); i++) + { + int idx_curved_side = curved_incomplete_indexes[i]; + Point close_transform_pnt_curr = original_points[idx_curved_side]; + Point close_transform_pnt_next = original_points[(idx_curved_side + 1) % 4]; + + vector patterns_indexes; + + for (size_t j = 0; j < patterns_vertices_points.size(); j++) + { + for (size_t k = 0; k < patterns_vertices_points[j].size(); k++) + { + if (norm(close_transform_pnt_curr - patterns_vertices_points[j][k]) < 5) + { + patterns_indexes.push_back(j); + break; + } + if (norm(close_transform_pnt_next - patterns_vertices_points[j][k]) < 5) + { + patterns_indexes.push_back(j); + break; + } + } + } + for (size_t j = 0; j < patterns_indexes.size(); j++) + { + vector vertices = patterns_vertices_points[patterns_indexes[j]]; + vector > vertices_dist_pair; + vector points; + for (size_t k = 0; k < vertices.size(); k++) + { + double dist_to_side = distancePointToLine(vertices[k], close_transform_pnt_curr, + close_transform_pnt_next); + vertices_dist_pair.push_back(std::pair((int)k, dist_to_side)); + } + if (vertices_dist_pair.size() == 0) + { + return false; + } + sort(vertices_dist_pair.begin(), vertices_dist_pair.end(), sortPairAsc()); + Point p1, p2; + int index_p1_in_vertices = 0, index_p2_in_vertices = 0; + for (int k = 4; k > 0; k--) + { + if((vertices_dist_pair[0].first == k % 4) && (vertices_dist_pair[1].first == (k - 1) % 4)) + { + index_p1_in_vertices = vertices_dist_pair[0].first; + index_p2_in_vertices = vertices_dist_pair[1].first; + } + else if((vertices_dist_pair[1].first == k % 4) && (vertices_dist_pair[0].first == (k - 1) % 4)) + { + index_p1_in_vertices = vertices_dist_pair[1].first; + index_p2_in_vertices = vertices_dist_pair[0].first; + } + } + if (index_p1_in_vertices == index_p2_in_vertices) return false; + + p1 = vertices[index_p1_in_vertices]; + p2 = vertices[index_p2_in_vertices]; + + size_t index_p1_in_contour = 0, index_p2_in_contour = 0; + vector add_points = patterns_contours[patterns_indexes[j]]; + + for(size_t k = 0; k < add_points.size(); k++) + { + if (add_points[k] == p1) + { + index_p1_in_contour = k; + } + if (add_points[k] == p2) + { + index_p2_in_contour = k; + } + } + + if (index_p1_in_contour > index_p2_in_contour) + { + for (size_t k = index_p1_in_contour; k < add_points.size(); k++) + { + points.push_back(add_points[k]); + } + for (size_t k = 0; k <= index_p2_in_contour; k++) + { + points.push_back(add_points[k]); + } + } + else if (index_p1_in_contour < index_p2_in_contour) + { + for (size_t k = index_p1_in_contour; k <= index_p2_in_contour; k++) + { + points.push_back(add_points[k]); + } + } + else + { + return false; + } + if (abs(p1.x - p2.x) > abs(p1.y - p2.y)) + { + sort(points.begin(), points.end(), sortPointsByX()); + } + else + { + sort(points.begin(), points.end(), sortPointsByY()); + } + + temp_patterns_add_points.push_back(std::pair >(idx_curved_side,points)); + } + } + + return true; +} + +bool QRDecode::computePatternsAddingPoints(std::map > &patterns_add_points) +{ + vector > > temp_patterns_add_points; + if(!findTempPatternsAddingPoints(temp_patterns_add_points)) + { + return false; + } + + const int num_points_in_pattern = 3; + for(size_t i = 0; i < temp_patterns_add_points.size(); i++) + { + int idx_side = temp_patterns_add_points[i].first; + int size = (int)temp_patterns_add_points[i].second.size(); + + float step = static_cast(size) / num_points_in_pattern; + vector temp_points; + for (int j = 0; j < num_points_in_pattern; j++) + { + float val = j * step; + int idx = cvRound(val) >= size ? size - 1 : cvRound(val); + temp_points.push_back(temp_patterns_add_points[i].second[idx]); + } + temp_points.push_back(temp_patterns_add_points[i].second.back()); + if(patterns_add_points.count(idx_side) == 1) + { + patterns_add_points[idx_side].insert(patterns_add_points[idx_side].end(), + temp_points.begin(), temp_points.end()); + } + patterns_add_points.insert(std::pair >(idx_side, temp_points)); + + } + if (patterns_add_points.size() == 0) + { + return false; + } + + return true; +} + +bool QRDecode::addPointsToSides() +{ + if(!computePatternsAddingPoints(complete_curved_sides)) + { + return false; + } + std::map >::iterator it; + double mean_step = 0.0; + size_t num_points_at_side = 0; + for (it = complete_curved_sides.begin(); it != complete_curved_sides.end(); ++it) + { + int count = -1; + const size_t num_points_at_pattern = it->second.size(); + for(size_t j = 0; j < num_points_at_pattern - 1; j++, count++) + { + if (count == 3) continue; + double temp_norm = norm(it->second[j] - + it->second[j + 1]); + mean_step += temp_norm; + } + num_points_at_side += num_points_at_pattern; + } + if (num_points_at_side == 0) + { + return false; + } + mean_step /= num_points_at_side; + + const size_t num_incomplete_sides = curved_incomplete_indexes.size(); + for (size_t i = 0; i < num_incomplete_sides; i++) + { + int idx = curved_incomplete_indexes[i]; + vector sides_points_indexes; + + const int num_points_at_side_to_add = (int)sides_points[idx].size(); + for (int j = 0; j < num_points_at_side_to_add; j++) + { + bool not_too_close = true; + const size_t num_points_at_side_exist = complete_curved_sides[idx].size(); + for (size_t k = 0; k < num_points_at_side_exist; k++) + { + double temp_norm = norm(sides_points[idx][j] - complete_curved_sides[idx][k]); + if (temp_norm < mean_step) + { + not_too_close = false; + break; + } + } + if (not_too_close) + { + sides_points_indexes.push_back(j); + } + } + + for (size_t j = 0; j < sides_points_indexes.size(); j++) + { + bool not_equal = true; + for (size_t k = 0; k < complete_curved_sides[idx].size(); k++) + { + if (sides_points[idx][sides_points_indexes[j]] == + complete_curved_sides[idx][k]) + { + not_equal = false; + } + } + if (not_equal) + { + complete_curved_sides[idx].push_back(sides_points[idx][sides_points_indexes[j]]); + } + } + } + + return true; +} + +void QRDecode::completeAndSortSides() +{ + if (complete_curved_sides.size() < 2) + { + for (int i = 0; i < NUM_SIDES; i++) + { + if(complete_curved_sides.count(curved_indexes[i]) == 0) + { + int idx_second_cur_side = curved_indexes[i]; + complete_curved_sides.insert(std::pair >(idx_second_cur_side, sides_points[idx_second_cur_side])); + } + } + } + std::map >::iterator it; + for (it = complete_curved_sides.begin(); it != complete_curved_sides.end(); ++it) + { + Point p1 = it->second.front(); + Point p2 = it->second.back(); + if (abs(p1.x - p2.x) > abs(p1.y - p2.y)) + { + sort(it->second.begin(), it->second.end(), sortPointsByX()); + } + else + { + sort(it->second.begin(), it->second.end(), sortPointsByY()); + } + } +} + +vector > QRDecode::computeSpline(const vector &x_arr, const vector &y_arr) +{ + const int n = (int)x_arr.size(); + vector a, b(n - 1), d(n - 1), h(n - 1), alpha(n - 1), c(n), l(n), mu(n), z(n); + + for (int i = 0; i < (int)y_arr.size(); i++) + { + a.push_back(static_cast(x_arr[i])); + } + for (int i = 0; i < n - 1; i++) + { + h[i] = static_cast(y_arr[i + 1] - y_arr[i]); + } + for (int i = 1; i < n - 1; i++) + { + alpha[i] = 3 / h[i] * (a[i + 1] - a[i]) - 3 / (h[i - 1]) * (a[i] - a[i - 1]); + } + l[0] = 1; + mu[0] = 0; + z[0] = 0; + + for (int i = 1; i < n - 1; i++) + { + l[i] = 2 * (y_arr[i + 1] - y_arr[i - 1]) - h[i - 1] * mu[i - 1]; + mu[i] = h[i] / l[i]; + z[i] = (alpha[i] - h[i - 1] * z[i - 1]) / l[i]; + } + l[n - 1] = 1; + z[n - 1] = 0; + c[n - 1] = 0; + + for(int j = n - 2; j >= 0; j--) + { + c[j] = z[j] - mu[j] * c[j + 1]; + b[j] = (a[j + 1] - a[j]) / h[j] - (h[j] * (c[j + 1] + 2 * c[j])) / 3; + d[j] = (c[j + 1] - c[j]) / (3 * h[j]); + } + + vector > S(n - 1); + for (int i = 0; i < n - 1; i++) + { + S[i].push_back(a[i]); + S[i].push_back(b[i]); + S[i].push_back(c[i]); + S[i].push_back(d[i]); + } + + return S; +} + +bool QRDecode::createSpline(vector > &spline_lines) +{ + int start, end; + vector > S; + + for (int idx = 0; idx < NUM_SIDES; idx++) + { + int idx_curved_side = curved_indexes[idx]; + + vector spline_points = complete_curved_sides.find(idx_curved_side)->second; + vector x_arr, y_arr; + + for (size_t j = 0; j < spline_points.size(); j++) + { + x_arr.push_back(cvRound(spline_points[j].x)); + y_arr.push_back(cvRound(spline_points[j].y)); + } + + bool horizontal_order = abs(x_arr.front() - x_arr.back()) > abs(y_arr.front() - y_arr.back()); + vector& second_arr = horizontal_order ? x_arr : y_arr; + vector& first_arr = horizontal_order ? y_arr : x_arr; + + S = computeSpline(first_arr, second_arr); + + int closest_point_first = horizontal_order ? closest_points[idx_curved_side].second.x + : closest_points[idx_curved_side].second.y; + int closest_point_second = horizontal_order ? closest_points[(idx_curved_side + 1) % 4].second.x + : closest_points[(idx_curved_side + 1) % 4].second.y; + + start = idx_curved_side; + end = (idx_curved_side + 1) % 4; + if(closest_point_first > closest_point_second) + { + start = (idx_curved_side + 1) % 4; + end = idx_curved_side; + } + + int closest_point_start = horizontal_order ? closest_points[start].second.x : closest_points[start].second.y; + int closest_point_end = horizontal_order ? closest_points[end].second.x : closest_points[end].second.y; + + for (int index = closest_point_start; index <= closest_point_end; index++) + { + if (index == second_arr.front()) + { + spline_lines[idx].push_back(closest_points[start].second); + } + for (size_t i = 0; i < second_arr.size() - 1; i++) + { + if ((index > second_arr[i]) && (index <= second_arr[i + 1])) + { + float val = S[i][0] + S[i][1] * (index - second_arr[i]) + S[i][2] * (index - second_arr[i]) * (index - second_arr[i]) + + S[i][3] * (index - second_arr[i]) * (index - second_arr[i]) * (index - second_arr[i]); + spline_lines[idx].push_back(horizontal_order ? Point2f(static_cast(index), val) : Point2f(val, static_cast(index))); + } + } + } + } + return true; +} + +bool QRDecode::divideIntoEvenSegments(vector > &segments_points) +{ + vector > spline_lines(NUM_SIDES); + if (!createSpline(spline_lines)) + { + return false; + } + float mean_num_points_in_line = 0.0; + for (int i = 0; i < NUM_SIDES; i++) + { + mean_num_points_in_line += spline_lines[i].size(); + } + mean_num_points_in_line /= NUM_SIDES; + const int min_num_points = 1, max_num_points = cvRound(mean_num_points_in_line / 2.0); + float linear_threshold = 0.5f; + for (int num = min_num_points; num < max_num_points; num++) + { + for (int i = 0; i < NUM_SIDES; i++) + { + segments_points[i].clear(); + + int size = (int)spline_lines[i].size(); + float step = static_cast(size) / num; + for (int j = 0; j < num; j++) + { + float val = j * step; + int idx = cvRound(val) >= size ? size - 1 : cvRound(val); + segments_points[i].push_back(spline_lines[i][idx]); + } + segments_points[i].push_back(spline_lines[i].back()); + } + float mean_of_two_sides = 0.0; + for (int i = 0; i < NUM_SIDES; i++) + { + float mean_dist_in_segment = 0.0; + for (size_t j = 0; j < segments_points[i].size() - 1; j++) + { + Point2f segment_start = segments_points[i][j]; + Point2f segment_end = segments_points[i][j + 1]; + vector::iterator it_start, it_end, it; + it_start = find(spline_lines[i].begin(), spline_lines[i].end(), segment_start); + it_end = find(spline_lines[i].begin(), spline_lines[i].end(), segment_end); + float max_dist_to_line = 0.0; + for (it = it_start; it != it_end; it++) + { + float temp_dist = distancePointToLine(*it, segment_start, segment_end); + if (temp_dist > max_dist_to_line) + { + max_dist_to_line = temp_dist; + } + } + mean_dist_in_segment += max_dist_to_line; + } + mean_dist_in_segment /= segments_points[i].size(); + mean_of_two_sides += mean_dist_in_segment; + } + mean_of_two_sides /= NUM_SIDES; + if (mean_of_two_sides < linear_threshold) + { + break; + } + } + + return true; +} + +bool QRDecode::straightenQRCodeInParts() +{ + vector > segments_points(NUM_SIDES); + if (!divideIntoEvenSegments(segments_points)) + { + return false; + } + vector current_curved_side, opposite_curved_side; + + for (int i = 0; i < NUM_SIDES; i++) + { + Point2f temp_point_start = segments_points[i].front(); + Point2f temp_point_end = segments_points[i].back(); + bool horizontal_order = (abs(temp_point_start.x - temp_point_end.x) > + abs(temp_point_start.y - temp_point_end.y)); + float compare_point_current = horizontal_order ? segments_points[i].front().y + : segments_points[(i + 1) % 2].front().x; + float compare_point_opposite = horizontal_order ? segments_points[(i + 1) % 2].front().y + : segments_points[i].front().x; + + if (compare_point_current > compare_point_opposite) + { + current_curved_side = segments_points[i]; + opposite_curved_side = segments_points[(i + 1) % 2]; + } + } + if (current_curved_side.size() != opposite_curved_side.size()) + { + return false; + } + size_t number_pnts_to_cut = current_curved_side.size(); + if (number_pnts_to_cut == 0) + { + return false; + } + float perspective_curved_size = 251.0; + const Size temporary_size(cvRound(perspective_curved_size), cvRound(perspective_curved_size)); + + float dist = perspective_curved_size / (number_pnts_to_cut - 1); + Mat perspective_result = Mat::zeros(temporary_size, CV_8UC1); + vector curved_parts_points; + + float start_cut = 0.0; + vector temp_closest_points(4); + + for (size_t i = 1; i < number_pnts_to_cut; i++) + { + curved_parts_points.clear(); + Mat test_mask = Mat::zeros(bin_barcode.size(), CV_8UC1); + + Point2f start_point = current_curved_side[i]; + Point2f prev_start_point = current_curved_side[i - 1]; + Point2f finish_point = opposite_curved_side[i]; + Point2f prev_finish_point = opposite_curved_side[i - 1]; + + for (size_t j = 0; j < qrcode_locations.size(); j++) + { + if ((pointPosition(start_point, finish_point, qrcode_locations[j]) >= 0) && + (pointPosition(prev_start_point, prev_finish_point, qrcode_locations[j]) <= 0)) + { + test_mask.at(qrcode_locations[j]) = 255; + } + } + + vector perspective_points; + + perspective_points.push_back(Point2f(0.0, start_cut)); + perspective_points.push_back(Point2f(perspective_curved_size, start_cut)); + + perspective_points.push_back(Point2f(perspective_curved_size, start_cut + dist)); + perspective_points.push_back(Point2f(0.0, start_cut+dist)); + + perspective_points.push_back(Point2f(perspective_curved_size * 0.5f, start_cut + dist * 0.5f)); + + if (i == 1) + { + for (size_t j = 0; j < closest_points.size(); j++) + { + if (arePointsNearest(closest_points[j].second, prev_start_point, 3.0)) + { + temp_closest_points[j] = perspective_points[0]; + } + else if (arePointsNearest(closest_points[j].second, prev_finish_point, 3.0)) + { + temp_closest_points[j] = perspective_points[1]; + } + } + } + if (i == number_pnts_to_cut - 1) + { + for (size_t j = 0; j < closest_points.size(); j++) + { + if (arePointsNearest(closest_points[j].second, finish_point, 3.0)) + { + temp_closest_points[j] = perspective_points[2]; + } + else if (arePointsNearest(closest_points[j].second, start_point, 3.0)) + { + temp_closest_points[j] = perspective_points[3]; + } + } + } + start_cut += dist; + + curved_parts_points.push_back(prev_start_point); + curved_parts_points.push_back(prev_finish_point); + curved_parts_points.push_back(finish_point); + curved_parts_points.push_back(start_point); + + Point2f center_point = intersectionLines(curved_parts_points[0], curved_parts_points[2], + curved_parts_points[1], curved_parts_points[3]); + if (cvIsNaN(center_point.x) || cvIsNaN(center_point.y)) + return false; + + vector pts = curved_parts_points; + pts.push_back(center_point); + + Mat H = findHomography(pts, perspective_points); + Mat temp_intermediate(temporary_size, CV_8UC1); + warpPerspective(test_mask, temp_intermediate, H, temporary_size, INTER_NEAREST); + perspective_result += temp_intermediate; + + } + Mat white_mask = Mat(temporary_size, CV_8UC1, Scalar(255)); + Mat inversion = white_mask - perspective_result; + Mat temp_result; + + original_curved_points = temp_closest_points; + + Point2f original_center_point = intersectionLines(original_curved_points[0], original_curved_points[2], + original_curved_points[1], original_curved_points[3]); + + original_curved_points.push_back(original_center_point); + + for (size_t i = 0; i < original_curved_points.size(); i++) + { + if (cvIsNaN(original_curved_points[i].x) || cvIsNaN(original_curved_points[i].y)) + return false; + } + + vector perspective_straight_points; + perspective_straight_points.push_back(Point2f(0.f, 0.f)); + perspective_straight_points.push_back(Point2f(perspective_curved_size, 0.f)); + + perspective_straight_points.push_back(Point2f(perspective_curved_size, perspective_curved_size)); + perspective_straight_points.push_back(Point2f(0.f, perspective_curved_size)); + + perspective_straight_points.push_back(Point2f(perspective_curved_size * 0.5f, perspective_curved_size * 0.5f)); + + Mat H = findHomography(original_curved_points, perspective_straight_points); + warpPerspective(inversion, temp_result, H, temporary_size, INTER_NEAREST, BORDER_REPLICATE); + + no_border_intermediate = temp_result(Range(1, temp_result.rows), Range(1, temp_result.cols)); + const int border = cvRound(0.1 * perspective_curved_size); + const int borderType = BORDER_CONSTANT; + copyMakeBorder(no_border_intermediate, curved_to_straight, border, border, border, border, borderType, Scalar(255)); + intermediate = curved_to_straight; + + return true; +} + +bool QRDecode::preparingCurvedQRCodes() +{ + vector result_integer_hull; + getPointsInsideQRCode(original_points); + if (qrcode_locations.size() == 0) + return false; + convexHull(qrcode_locations, result_integer_hull); + if (!computeClosestPoints(result_integer_hull)) + return false; + if (!computeSidesPoints(result_integer_hull)) + return false; + if (!findAndAddStablePoint(result_integer_hull)) + return false; + if (!findIndexesCurvedSides()) + return false; + if (findIncompleteIndexesCurvedSides()) + { + if(!addPointsToSides()) + return false; + } + completeAndSortSides(); + if (!straightenQRCodeInParts()) + return false; + + return true; +} + bool QRDecode::updatePerspective() { CV_TRACE_FUNCTION(); - const Point2f centerPt = QRDetect::intersectionLines(original_points[0], original_points[2], - original_points[1], original_points[3]); + const Point2f centerPt = intersectionLines(original_points[0], original_points[2], + original_points[1], original_points[3]); if (cvIsNaN(centerPt.x) || cvIsNaN(centerPt.y)) return false; @@ -1121,7 +2347,7 @@ bool QRDecode::samplingForVersion() CV_TRACE_FUNCTION(); const double multiplyingFactor = (version < 3) ? 1 : (version == 3) ? 1.5 : - version * (5 + version - 4); + version * (version + 1); const Size newFactorSize( cvRound(no_border_intermediate.size().width * multiplyingFactor), cvRound(no_border_intermediate.size().height * multiplyingFactor)); @@ -1206,7 +2432,7 @@ bool QRDecode::decodingProcess() } -bool QRDecode::fullDecodingProcess() +bool QRDecode::straightDecodingProcess() { #ifdef HAVE_QUIRC if (!updatePerspective()) { return false; } @@ -1220,6 +2446,20 @@ bool QRDecode::fullDecodingProcess() #endif } +bool QRDecode::curvedDecodingProcess() +{ +#ifdef HAVE_QUIRC + if (!preparingCurvedQRCodes()) { return false; } + if (!versionDefinition()) { return false; } + if (!samplingForVersion()) { return false; } + if (!decodingProcess()) { return false; } + return true; +#else + std::cout << "Library QUIRC is not linked. No decoding is performed. Take it to the OpenCV repository." << std::endl; + return false; +#endif +} + bool decodeQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode) { QRCodeDetector qrcode; @@ -1227,6 +2467,13 @@ bool decodeQRCode(InputArray in, InputArray points, std::string &decoded_info, O return !decoded_info.empty(); } +bool decodeCurvedQRCode(InputArray in, InputArray points, std::string &decoded_info, OutputArray straight_qrcode) +{ + QRCodeDetector qrcode; + decoded_info = qrcode.decodeCurved(in, points, straight_qrcode); + return !decoded_info.empty(); +} + cv::String QRCodeDetector::decode(InputArray in, InputArray points, OutputArray straight_qrcode) { @@ -1241,7 +2488,35 @@ cv::String QRCodeDetector::decode(InputArray in, InputArray points, QRDecode qrdec; qrdec.init(inarr, src_points); - bool ok = qrdec.fullDecodingProcess(); + bool ok = qrdec.straightDecodingProcess(); + + std::string decoded_info = qrdec.getDecodeInformation(); + + if (ok && straight_qrcode.needed()) + { + qrdec.getStraightBarcode().convertTo(straight_qrcode, + straight_qrcode.fixedType() ? + straight_qrcode.type() : CV_32FC2); + } + + return ok ? decoded_info : std::string(); +} + +cv::String QRCodeDetector::decodeCurved(InputArray in, InputArray points, + OutputArray straight_qrcode) +{ + Mat inarr; + if (!checkQRInputImage(in, inarr)) + return std::string(); + + vector src_points; + points.copyTo(src_points); + CV_Assert(src_points.size() == 4); + CV_CheckGT(contourArea(src_points), 0.0, "Invalid QR code source points"); + + QRDecode qrdec; + qrdec.init(inarr, src_points); + bool ok = qrdec.curvedDecodingProcess(); std::string decoded_info = qrdec.getDecodeInformation(); @@ -1278,6 +2553,29 @@ cv::String QRCodeDetector::detectAndDecode(InputArray in, return decoded_info; } +cv::String QRCodeDetector::detectAndDecodeCurved(InputArray in, + OutputArray points_, + OutputArray straight_qrcode) +{ + Mat inarr; + if (!checkQRInputImage(in, inarr)) + { + points_.release(); + return std::string(); + } + + vector points; + bool ok = detect(inarr, points); + if (!ok) + { + points_.release(); + return std::string(); + } + updatePointsResult(points_, points); + std::string decoded_info = decodeCurved(inarr, points, straight_qrcode); + return decoded_info; +} + class QRDetectMulti : public QRDetect { public: @@ -1510,7 +2808,6 @@ void QRDetectMulti::fixationPoints(vector &local_point) Point2f(static_cast(bin_barcode_temp.cols - 1), static_cast(bin_barcode_temp.rows - 1)))); - vector list_area_pnt; list_area_pnt.push_back(current_point); @@ -2241,7 +3538,7 @@ public: for (int i = range.start; i < range.end; i++) { qrdec[i].init(inarr, src_points[i]); - bool ok = qrdec[i].fullDecodingProcess(); + bool ok = qrdec[i].straightDecodingProcess(); if (ok) { decoded_info[i] = qrdec[i].getDecodeInformation(); @@ -2261,7 +3558,7 @@ public: src_points[i][j] /= static_cast(coeff_expansion); } qrdec[i].init(inarr2, src_points[i]); - ok = qrdec[i].fullDecodingProcess(); + ok = qrdec[i].straightDecodingProcess(); if (ok) { decoded_info[i] = qrdec[i].getDecodeInformation(); diff --git a/modules/objdetect/test/test_qrcode.cpp b/modules/objdetect/test/test_qrcode.cpp index a716c837ee..c26cd8a4f2 100644 --- a/modules/objdetect/test/test_qrcode.cpp +++ b/modules/objdetect/test/test_qrcode.cpp @@ -21,6 +21,9 @@ std::string qrcode_images_close[] = { std::string qrcode_images_monitor[] = { "monitor_1.png", "monitor_2.png", "monitor_3.png", "monitor_4.png", "monitor_5.png" }; +std::string qrcode_images_curved[] = { + "curved_1.jpg", "curved_2.jpg", "curved_3.jpg", "curved_4.jpg", "curved_5.jpg", "curved_6.jpg", "curved_7.jpg", "curved_8.jpg" +}; std::string qrcode_images_multiple[] = { "2_qrcodes.png", "3_close_qrcodes.png", "3_qrcodes.png", "4_qrcodes.png", "5_qrcodes.png", "6_qrcodes.png", "7_qrcodes.png", "8_close_qrcodes.png" @@ -137,7 +140,38 @@ TEST(Objdetect_QRCode_Monitor, generate_test_data) file_config << "]"; file_config.release(); } +TEST(Objdetect_QRCode_Curved, generate_test_data) +{ + const std::string root = "qrcode/curved/"; + const std::string dataset_config = findDataFile(root + "dataset_config.json"); + FileStorage file_config(dataset_config, FileStorage::WRITE); + file_config << "test_images" << "["; + size_t images_count = sizeof(qrcode_images_curved) / sizeof(qrcode_images_curved[0]); + for (size_t i = 0; i < images_count; i++) + { + file_config << "{:" << "image_name" << qrcode_images_curved[i]; + std::string image_path = findDataFile(root + qrcode_images_curved[i]); + std::vector corners; + Mat src = imread(image_path, IMREAD_GRAYSCALE), straight_barcode; + std::string decoded_info; + ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path; + EXPECT_TRUE(detectQRCode(src, corners)); +#ifdef HAVE_QUIRC + EXPECT_TRUE(decodeCurvedQRCode(src, corners, decoded_info, straight_barcode)); +#endif + file_config << "x" << "[:"; + for (size_t j = 0; j < corners.size(); j++) { file_config << corners[j].x; } + file_config << "]"; + file_config << "y" << "[:"; + for (size_t j = 0; j < corners.size(); j++) { file_config << corners[j].y; } + file_config << "]"; + file_config << "info" << decoded_info; + file_config << "}"; + } + file_config << "]"; + file_config.release(); +} TEST(Objdetect_QRCode_Multi, generate_test_data) { const std::string root = "qrcode/multiple/"; @@ -390,6 +424,66 @@ TEST_P(Objdetect_QRCode_Monitor, regression) } } +typedef testing::TestWithParam< std::string > Objdetect_QRCode_Curved; +TEST_P(Objdetect_QRCode_Curved, regression) +{ + const std::string name_current_image = GetParam(); + const std::string root = "qrcode/curved/"; + const int pixels_error = 3; + + std::string image_path = findDataFile(root + name_current_image); + Mat src = imread(image_path, IMREAD_GRAYSCALE), straight_barcode; + ASSERT_FALSE(src.empty()) << "Can't read image: " << image_path; + + std::vector corners; + std::string decoded_info; + QRCodeDetector qrcode; +#ifdef HAVE_QUIRC + decoded_info = qrcode.detectAndDecodeCurved(src, corners, straight_barcode); + ASSERT_FALSE(corners.empty()); + ASSERT_FALSE(decoded_info.empty()); +#else + ASSERT_TRUE(qrcode.detect(src, corners)); +#endif + + const std::string dataset_config = findDataFile(root + "dataset_config.json"); + FileStorage file_config(dataset_config, FileStorage::READ); + ASSERT_TRUE(file_config.isOpened()) << "Can't read validation data: " << dataset_config; + { + FileNode images_list = file_config["test_images"]; + size_t images_count = static_cast(images_list.size()); + ASSERT_GT(images_count, 0u) << "Can't find validation data entries in 'test_images': " << dataset_config; + + for (size_t index = 0; index < images_count; index++) + { + FileNode config = images_list[(int)index]; + std::string name_test_image = config["image_name"]; + if (name_test_image == name_current_image) + { + for (int i = 0; i < 4; i++) + { + int x = config["x"][i]; + int y = config["y"][i]; + EXPECT_NEAR(x, corners[i].x, pixels_error); + EXPECT_NEAR(y, corners[i].y, pixels_error); + } + +#ifdef HAVE_QUIRC + std::string original_info = config["info"]; + EXPECT_EQ(decoded_info, original_info); +#endif + + return; // done + } + } + std::cerr + << "Not found results for '" << name_current_image + << "' image in config file:" << dataset_config << std::endl + << "Re-run tests with enabled UPDATE_QRCODE_TEST_DATA macro to update test data." + << std::endl; + } +} + typedef testing::TestWithParam < std::string > Objdetect_QRCode_Multi; TEST_P(Objdetect_QRCode_Multi, regression) { @@ -478,6 +572,7 @@ TEST_P(Objdetect_QRCode_Multi, regression) INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode, testing::ValuesIn(qrcode_images_name)); INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Close, testing::ValuesIn(qrcode_images_close)); INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Monitor, testing::ValuesIn(qrcode_images_monitor)); +INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Curved, testing::ValuesIn(qrcode_images_curved)); INSTANTIATE_TEST_CASE_P(/**/, Objdetect_QRCode_Multi, testing::ValuesIn(qrcode_images_multiple)); TEST(Objdetect_QRCode_decodeMulti, decode_regression_16491) From 36598677cffe5ab7a22ffb4af3a2e2a73fe55db4 Mon Sep 17 00:00:00 2001 From: Quentin Chateau Date: Sun, 25 Oct 2020 16:58:27 +0100 Subject: [PATCH 050/152] Merge pull request #18646 from qchateau:wave-auto * stitching: add WAVE_CORRECT_AUTO * stitching: use CV_EXPORTS --- .../stitching/detail/motion_estimators.hpp | 12 ++++- modules/stitching/src/motion_estimators.cpp | 45 +++++++++++++++++ modules/stitching/test/test_precomp.hpp | 1 + .../stitching/test/test_wave_correction.cpp | 50 +++++++++++++++++++ 4 files changed, 107 insertions(+), 1 deletion(-) create mode 100644 modules/stitching/test/test_wave_correction.cpp diff --git a/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp b/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp index ff05af1814..ad21ee1277 100644 --- a/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp @@ -328,9 +328,19 @@ private: enum WaveCorrectKind { WAVE_CORRECT_HORIZ, - WAVE_CORRECT_VERT + WAVE_CORRECT_VERT, + WAVE_CORRECT_AUTO }; +/** @brief Tries to detect the wave correction kind depending +on whether a panorama spans horizontally or vertically + +@param rmats Camera rotation matrices. +@return The correction kind to use for this panorama + */ +CV_EXPORTS +WaveCorrectKind autoDetectWaveCorrectKind(const std::vector &rmats); + /** @brief Tries to make panorama more horizontal (or vertical). @param rmats Camera rotation matrices. diff --git a/modules/stitching/src/motion_estimators.cpp b/modules/stitching/src/motion_estimators.cpp index d9848dbe7f..c0b46b101d 100644 --- a/modules/stitching/src/motion_estimators.cpp +++ b/modules/stitching/src/motion_estimators.cpp @@ -886,6 +886,45 @@ void BundleAdjusterAffinePartial::calcJacobian(Mat &jac) ////////////////////////////////////////////////////////////////////////////// +WaveCorrectKind autoDetectWaveCorrectKind(const std::vector &rmats) +{ + std::vector xs, ys; + xs.reserve(rmats.size()); + ys.reserve(rmats.size()); + + // Project a [0, 0, 1, 1] point to the camera image frame + // Ignore intrinsic parameters and camera translation as they + // have little influence + // This also means we can simply use "rmat.col(2)" as the + // projected point homogeneous coordinate + for (const Mat& rmat: rmats) + { + CV_Assert(rmat.type() == CV_32F); + xs.push_back(rmat.at(0, 2) / rmat.at(2, 2)); + ys.push_back(rmat.at(1, 2) / rmat.at(2, 2)); + } + + // Calculate the delta between the max and min values for + // both the X and Y axis + auto min_max_x = std::minmax_element(xs.begin(), xs.end()); + auto min_max_y = std::minmax_element(ys.begin(), ys.end()); + double delta_x = *min_max_x.second - *min_max_x.first; + double delta_y = *min_max_y.second - *min_max_y.first; + + // If the Y delta is the biggest, it means the images + // mostly span along the vertical axis: correct this axis + if (delta_y > delta_x) + { + LOGLN(" using vertical wave correction"); + return WAVE_CORRECT_VERT; + } + else + { + LOGLN(" using horizontal wave correction"); + return WAVE_CORRECT_HORIZ; + } +} + void waveCorrect(std::vector &rmats, WaveCorrectKind kind) { LOGLN("Wave correcting..."); @@ -898,12 +937,18 @@ void waveCorrect(std::vector &rmats, WaveCorrectKind kind) return; } + if (kind == WAVE_CORRECT_AUTO) + { + kind = autoDetectWaveCorrectKind(rmats); + } + Mat moment = Mat::zeros(3, 3, CV_32F); for (size_t i = 0; i < rmats.size(); ++i) { Mat col = rmats[i].col(0); moment += col * col.t(); } + Mat eigen_vals, eigen_vecs; eigen(moment, eigen_vals, eigen_vecs); diff --git a/modules/stitching/test/test_precomp.hpp b/modules/stitching/test/test_precomp.hpp index 8e7709a7ec..e761fb1fb0 100644 --- a/modules/stitching/test/test_precomp.hpp +++ b/modules/stitching/test/test_precomp.hpp @@ -6,6 +6,7 @@ #include "opencv2/ts.hpp" #include "opencv2/stitching.hpp" +#include "opencv2/stitching/detail/motion_estimators.hpp" #include "opencv2/stitching/detail/matchers.hpp" #include "opencv2/stitching/detail/blenders.hpp" #include "opencv2/stitching/detail/exposure_compensate.hpp" diff --git a/modules/stitching/test/test_wave_correction.cpp b/modules/stitching/test/test_wave_correction.cpp new file mode 100644 index 0000000000..1ac8ff07aa --- /dev/null +++ b/modules/stitching/test/test_wave_correction.cpp @@ -0,0 +1,50 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "test_precomp.hpp" + +namespace opencv_test { +namespace { + +detail::WaveCorrectKind correctionKind(const std::vector& images) +{ + + Ptr stitcher = Stitcher::create(Stitcher::PANORAMA); + stitcher->estimateTransform(images); + + std::vector rmats; + auto cameras = stitcher->cameras(); + for (const auto& camera: cameras) + rmats.push_back(camera.R); + + return detail::autoDetectWaveCorrectKind(rmats); +} + +TEST(WaveCorrection, AutoWaveCorrection) +{ + std::vector images(2); + imread(cvtest::TS::ptr()->get_data_path() + "stitching/s1.jpg").copyTo(images[0]); + imread(cvtest::TS::ptr()->get_data_path() + "stitching/s2.jpg").copyTo(images[1]); + + EXPECT_EQ(detail::WAVE_CORRECT_HORIZ, correctionKind(images)); + + std::vector rotated_images(2); + rotate(images[0], rotated_images[0], cv::ROTATE_90_CLOCKWISE); + rotate(images[1], rotated_images[1], cv::ROTATE_90_CLOCKWISE); + + EXPECT_EQ(detail::WAVE_CORRECT_VERT, correctionKind(rotated_images)); + + rotate(images[0], rotated_images[0], cv::ROTATE_90_COUNTERCLOCKWISE); + rotate(images[1], rotated_images[1], cv::ROTATE_90_COUNTERCLOCKWISE); + + EXPECT_EQ(detail::WAVE_CORRECT_VERT, correctionKind(rotated_images)); + + rotate(images[0], rotated_images[0], cv::ROTATE_180); + rotate(images[1], rotated_images[1], cv::ROTATE_180); + + EXPECT_EQ(detail::WAVE_CORRECT_HORIZ, correctionKind(rotated_images)); +} + +} // namespace +} // namespace opencv_test From afbf383ba3ccb75964206b316f120e9675f314e0 Mon Sep 17 00:00:00 2001 From: Ruslan Garnov Date: Mon, 26 Oct 2020 02:07:03 +0300 Subject: [PATCH 051/152] Minor s11n and RMat improvements: - Changed descr_of(RMat) to use canDescribeHelper to correctly handle planar case - Added export of createMat - Added setting of Storage::INPUT and Storage::OUTPUT in deserialization routine of GComputation --- modules/gapi/src/api/gmat.cpp | 2 +- modules/gapi/src/backends/common/gbackend.hpp | 2 +- .../src/backends/common/serialization.cpp | 21 ++++++++++++------- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/modules/gapi/src/api/gmat.cpp b/modules/gapi/src/api/gmat.cpp index d9f135222b..08bb170a86 100644 --- a/modules/gapi/src/api/gmat.cpp +++ b/modules/gapi/src/api/gmat.cpp @@ -144,7 +144,7 @@ bool GMatDesc::canDescribe(const cv::Mat& mat) const bool GMatDesc::canDescribe(const cv::RMat& mat) const { - return *this == mat.desc(); + return canDescribeHelper(*this, mat); } }// namespace cv diff --git a/modules/gapi/src/backends/common/gbackend.hpp b/modules/gapi/src/backends/common/gbackend.hpp index e96d2b0776..8c1749377e 100644 --- a/modules/gapi/src/backends/common/gbackend.hpp +++ b/modules/gapi/src/backends/common/gbackend.hpp @@ -134,7 +134,7 @@ inline cv::util::optional getCompileArg(const cv::GCompileArgs &args) return cv::gapi::getCompileArg(args); } -void createMat(const cv::GMatDesc& desc, cv::Mat& mat); +void GAPI_EXPORTS createMat(const cv::GMatDesc& desc, cv::Mat& mat); }} // cv::gimpl diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp index 2b23b33cc8..bb1864823f 100644 --- a/modules/gapi/src/backends/common/serialization.cpp +++ b/modules/gapi/src/backends/common/serialization.cpp @@ -94,13 +94,14 @@ void linkNodes(ade::Graph& g) { } void relinkProto(ade::Graph& g) { + using namespace cv::gimpl; // identify which node handles map to the protocol // input/output object in the reconstructed graph - using S = std::set; // FIXME: use ... - using M = std::map; // FIXME: unordered! + using S = std::set; // FIXME: use ... + using M = std::map; // FIXME: unordered! - cv::gimpl::GModel::Graph gm(g); - auto &proto = gm.metadata().get(); + GModel::Graph gm(g); + auto &proto = gm.metadata().get(); const S set_in(proto.inputs.begin(), proto.inputs.end()); const S set_out(proto.outputs.begin(), proto.outputs.end()); @@ -108,9 +109,9 @@ void relinkProto(ade::Graph& g) { // Associate the protocol node handles with their resource identifiers for (auto &&nh : gm.nodes()) { - if (gm.metadata(nh).get().t == cv::gimpl::NodeType::DATA) { - const auto &d = gm.metadata(nh).get(); - const auto rc = cv::gimpl::RcDesc{d.rc, d.shape, d.ctor}; + if (gm.metadata(nh).get().t == NodeType::DATA) { + const auto &d = gm.metadata(nh).get(); + const auto rc = RcDesc{d.rc, d.shape, d.ctor}; if (set_in.count(rc) > 0) { GAPI_DbgAssert(set_out.count(rc) == 0); map_in[rc] = nh; @@ -128,6 +129,12 @@ void relinkProto(ade::Graph& g) { proto.out_nhs.clear(); for (auto &rc : proto.inputs) { proto.in_nhs .push_back(map_in .at(rc)); } for (auto &rc : proto.outputs) { proto.out_nhs.push_back(map_out.at(rc)); } + + // If a subgraph is being serialized it's possible that + // some of its in/out nodes are INTERNAL in the full graph. + // Set their storage apporpriately + for (auto &nh : proto.in_nhs) { gm.metadata(nh).get().storage = Data::Storage::INPUT; } + for (auto &nh : proto.out_nhs) { gm.metadata(nh).get().storage = Data::Storage::OUTPUT; } } } // anonymous namespace From 93c3775927024166e59c467bc99302aac15e833b Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Mon, 26 Oct 2020 22:02:03 +0300 Subject: [PATCH 052/152] Merge pull request #18491 from TolyaTalamanov:at/wrap-inference [G-API] Wrap cv::gapi::infer into python * Introduce generic infer * Move Generic to infer.hpp * Removew num_outs * Fix windows warnings * Fix comments to review * Fix doxygen * Add comment * Fix comments to review * Wrap inference to python * Add default ctor to Params * Add test * Fix clang build * Implement GInferInputs/GInferOutputs as Pimpl * Add checkIEtarget to infer test * Fix path * Supress warning * Use getAvailableDevices insted of checkIETarget * Move PyParams to bindings_ie * Add namespace * Update CMakeLists.txt --- modules/gapi/CMakeLists.txt | 3 + modules/gapi/include/opencv2/gapi/infer.hpp | 29 ++++++--- .../opencv2/gapi/infer/bindings_ie.hpp | 56 +++++++++++++++++ .../gapi/include/opencv2/gapi/infer/ie.hpp | 2 +- modules/gapi/misc/python/pyopencv_gapi.hpp | 2 + modules/gapi/misc/python/shadow_gapi.hpp | 10 +-- .../gapi/misc/python/test/test_gapi_infer.py | 62 +++++++++++++++++++ modules/gapi/src/api/ginfer.cpp | 49 +++++++++++---- modules/gapi/src/backends/ie/bindings_ie.cpp | 39 ++++++++++++ 9 files changed, 224 insertions(+), 28 deletions(-) create mode 100644 modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp create mode 100644 modules/gapi/misc/python/test/test_gapi_infer.py create mode 100644 modules/gapi/src/backends/ie/bindings_ie.cpp diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt index 88ddeead16..137894cb8f 100644 --- a/modules/gapi/CMakeLists.txt +++ b/modules/gapi/CMakeLists.txt @@ -145,6 +145,9 @@ set(gapi_srcs # Serialization API and routines src/api/s11n.cpp src/backends/common/serialization.cpp + + # Python bridge + src/backends/ie/bindings_ie.cpp ) ocv_add_dispatched_file(backends/fluid/gfluidimgproc_func SSE4_1 AVX2) diff --git a/modules/gapi/include/opencv2/gapi/infer.hpp b/modules/gapi/include/opencv2/gapi/infer.hpp index 4fdd2df875..9b4580ec6b 100644 --- a/modules/gapi/include/opencv2/gapi/infer.hpp +++ b/modules/gapi/include/opencv2/gapi/infer.hpp @@ -133,14 +133,18 @@ struct InOutInfo * @{ * @brief G-API object used to collect network inputs */ -class GAPI_EXPORTS GInferInputs +class GAPI_EXPORTS_W_SIMPLE GInferInputs { +using Map = std::unordered_map; public: + GAPI_WRAP GInferInputs(); + GAPI_WRAP void setInput(const std::string& name, const cv::GMat& value); + cv::GMat& operator[](const std::string& name); - const std::unordered_map& getBlobs() const; + const Map& getBlobs() const; private: - std::unordered_map in_blobs; + std::shared_ptr in_blobs; }; /** @} */ @@ -148,16 +152,16 @@ private: * @{ * @brief G-API object used to collect network outputs */ -struct GAPI_EXPORTS GInferOutputs +struct GAPI_EXPORTS_W_SIMPLE GInferOutputs { public: + GAPI_WRAP GInferOutputs() = default; GInferOutputs(std::shared_ptr call); - cv::GMat at(const std::string& name); + GAPI_WRAP cv::GMat at(const std::string& name); private: - std::shared_ptr m_call; - InOutInfo* m_info = nullptr; - std::unordered_map out_blobs; + struct Priv; + std::shared_ptr m_priv; }; /** @} */ @@ -333,6 +337,11 @@ infer(const std::string& tag, const GInferInputs& inputs) return GInferOutputs{std::move(call)}; } +GAPI_EXPORTS_W inline GInferOutputs infer(const String& name, const GInferInputs& inputs) +{ + return infer(name, inputs); +} + } // namespace gapi } // namespace cv @@ -361,8 +370,8 @@ struct GAPI_EXPORTS GNetParam { * * @sa cv::gapi::networks */ -struct GAPI_EXPORTS GNetPackage { - GNetPackage() : GNetPackage({}) {} +struct GAPI_EXPORTS_W_SIMPLE GNetPackage { + GAPI_WRAP GNetPackage() : GNetPackage({}) {} explicit GNetPackage(std::initializer_list &&ii); std::vector backends() const; std::vector networks; diff --git a/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp b/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp new file mode 100644 index 0000000000..fdd4128b1a --- /dev/null +++ b/modules/gapi/include/opencv2/gapi/infer/bindings_ie.hpp @@ -0,0 +1,56 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_INFER_BINDINGS_IE_HPP +#define OPENCV_GAPI_INFER_BINDINGS_IE_HPP + +#include +#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS +#include // GKernelPackage +#include // Params + +#include + +namespace cv { +namespace gapi { +namespace ie { + +// NB: Used by python wrapper +// This class can be marked as SIMPLE, because it's implemented as pimpl +class GAPI_EXPORTS_W_SIMPLE PyParams { +public: + PyParams() = default; + + PyParams(const std::string &tag, + const std::string &model, + const std::string &weights, + const std::string &device); + + PyParams(const std::string &tag, + const std::string &model, + const std::string &device); + + GBackend backend() const; + std::string tag() const; + cv::util::any params() const; + +private: + std::shared_ptr> m_priv; +}; + +GAPI_EXPORTS_W PyParams params(const std::string &tag, + const std::string &model, + const std::string &weights, + const std::string &device); + +GAPI_EXPORTS_W PyParams params(const std::string &tag, + const std::string &model, + const std::string &device); +} // namespace ie +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_INFER_BINDINGS_IE_HPP diff --git a/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/modules/gapi/include/opencv2/gapi/infer/ie.hpp index dd2459da08..a8bc0bb05d 100644 --- a/modules/gapi/include/opencv2/gapi/infer/ie.hpp +++ b/modules/gapi/include/opencv2/gapi/infer/ie.hpp @@ -162,4 +162,4 @@ protected: } // namespace gapi } // namespace cv -#endif // OPENCV_GAPI_INFER_HPP +#endif // OPENCV_GAPI_INFER_IE_HPP diff --git a/modules/gapi/misc/python/pyopencv_gapi.hpp b/modules/gapi/misc/python/pyopencv_gapi.hpp index 0e862a4010..57c0b3db4f 100644 --- a/modules/gapi/misc/python/pyopencv_gapi.hpp +++ b/modules/gapi/misc/python/pyopencv_gapi.hpp @@ -5,6 +5,8 @@ // NB: Python wrapper replaces :: with _ for classes using gapi_GKernelPackage = cv::gapi::GKernelPackage; +using gapi_GNetPackage = cv::gapi::GNetPackage; +using gapi_ie_PyParams = cv::gapi::ie::PyParams; using gapi_wip_IStreamSource_Ptr = cv::Ptr; // FIXME: Python wrapper generate code without namespace std, diff --git a/modules/gapi/misc/python/shadow_gapi.hpp b/modules/gapi/misc/python/shadow_gapi.hpp index 72d7686eeb..0fac222212 100644 --- a/modules/gapi/misc/python/shadow_gapi.hpp +++ b/modules/gapi/misc/python/shadow_gapi.hpp @@ -6,23 +6,25 @@ namespace cv struct GAPI_EXPORTS_W_SIMPLE GCompileArg { }; GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GKernelPackage pkg); + GAPI_EXPORTS_W GCompileArgs compile_args(gapi::GNetPackage pkg); // NB: This classes doesn't exist in *.so // HACK: Mark them as a class to force python wrapper generate code for this entities class GAPI_EXPORTS_W_SIMPLE GProtoArg { }; class GAPI_EXPORTS_W_SIMPLE GProtoInputArgs { }; class GAPI_EXPORTS_W_SIMPLE GProtoOutputArgs { }; - class GAPI_EXPORTS_W_SIMPLE GRunArg { }; - class GAPI_EXPORTS_W_SIMPLE GMetaArg { }; + class GAPI_EXPORTS_W_SIMPLE GRunArg { }; + class GAPI_EXPORTS_W_SIMPLE GMetaArg { }; using GProtoInputArgs = GIOProtoArgs; using GProtoOutputArgs = GIOProtoArgs; namespace gapi { + GAPI_EXPORTS_W gapi::GNetPackage networks(const cv::gapi::ie::PyParams& params); namespace wip { class GAPI_EXPORTS_W IStreamSource { }; - } - } + } // namespace wip + } // namespace gapi } // namespace cv diff --git a/modules/gapi/misc/python/test/test_gapi_infer.py b/modules/gapi/misc/python/test/test_gapi_infer.py new file mode 100644 index 0000000000..a6fabf7253 --- /dev/null +++ b/modules/gapi/misc/python/test/test_gapi_infer.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python + +import numpy as np +import cv2 as cv +import os + +from tests_common import NewOpenCVTests + + +class test_gapi_infer(NewOpenCVTests): + + def test_getAvailableTargets(self): + targets = cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_OPENCV) + self.assertTrue(cv.dnn.DNN_TARGET_CPU in targets) + + + def test_age_gender_infer(self): + + # NB: Check IE + if not cv.dnn.DNN_TARGET_CPU in cv.dnn.getAvailableTargets(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE): + return + + root_path = '/omz_intel_models/intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013' + model_path = self.find_file(root_path + '.xml', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')]) + weights_path = self.find_file(root_path + '.bin', [os.environ.get('OPENCV_DNN_TEST_DATA_PATH')]) + img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')]) + device_id = 'CPU' + img = cv.resize(cv.imread(img_path), (62,62)) + + # OpenCV DNN + net = cv.dnn.readNetFromModelOptimizer(model_path, weights_path) + net.setPreferableBackend(cv.dnn.DNN_BACKEND_INFERENCE_ENGINE) + net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU) + + blob = cv.dnn.blobFromImage(img) + + net.setInput(blob) + dnn_age, dnn_gender = net.forward(net.getUnconnectedOutLayersNames()) + + # OpenCV G-API + g_in = cv.GMat() + inputs = cv.GInferInputs() + inputs.setInput('data', g_in) + + outputs = cv.gapi.infer("net", inputs) + age_g = outputs.at("age_conv3") + gender_g = outputs.at("prob") + + comp = cv.GComputation(cv.GIn(g_in), cv.GOut(age_g, gender_g)) + pp = cv.gapi.ie.params("net", model_path, weights_path, device_id) + + nets = cv.gapi.networks(pp) + args = cv.compile_args(nets) + gapi_age, gapi_gender = comp.apply(cv.gin(img), args=cv.compile_args(cv.gapi.networks(pp))) + + # Check + self.assertEqual(0.0, cv.norm(dnn_gender, gapi_gender, cv.NORM_INF)) + self.assertEqual(0.0, cv.norm(dnn_age, gapi_age, cv.NORM_INF)) + + +if __name__ == '__main__': + NewOpenCVTests.bootstrap() diff --git a/modules/gapi/src/api/ginfer.cpp b/modules/gapi/src/api/ginfer.cpp index 31d851b8e6..20511a4aaf 100644 --- a/modules/gapi/src/api/ginfer.cpp +++ b/modules/gapi/src/api/ginfer.cpp @@ -29,29 +29,52 @@ std::vector cv::gapi::GNetPackage::backends() const { // FIXME: Inference API is currently only available in full mode #if !defined(GAPI_STANDALONE) -cv::GMat& cv::GInferInputs::operator[](const std::string& name) { - return in_blobs[name]; +cv::GInferInputs::GInferInputs() + : in_blobs(std::make_shared()) +{ } -const std::unordered_map& cv::GInferInputs::getBlobs() const { - return in_blobs; +cv::GMat& cv::GInferInputs::operator[](const std::string& name) { + return (*in_blobs)[name]; +} + +const cv::GInferInputs::Map& cv::GInferInputs::getBlobs() const { + return *in_blobs; +} + +void cv::GInferInputs::setInput(const std::string& name, const cv::GMat& value) { + in_blobs->emplace(name, value); +} + +struct cv::GInferOutputs::Priv +{ + Priv(std::shared_ptr); + + std::shared_ptr call; + InOutInfo* info = nullptr; + std::unordered_map out_blobs; +}; + +cv::GInferOutputs::Priv::Priv(std::shared_ptr c) + : call(std::move(c)), info(cv::util::any_cast(&call->params())) +{ } cv::GInferOutputs::GInferOutputs(std::shared_ptr call) - : m_call(std::move(call)), m_info(cv::util::any_cast(&m_call->params())) + : m_priv(std::make_shared(std::move(call))) { -}; +} cv::GMat cv::GInferOutputs::at(const std::string& name) { - auto it = out_blobs.find(name); - if (it == out_blobs.end()) { + auto it = m_priv->out_blobs.find(name); + if (it == m_priv->out_blobs.end()) { // FIXME: Avoid modifying GKernel - m_call->kernel().outShapes.push_back(cv::GShape::GMAT); - int out_idx = static_cast(out_blobs.size()); - it = out_blobs.emplace(name, m_call->yield(out_idx)).first; - m_info->out_names.push_back(name); + m_priv->call->kernel().outShapes.push_back(cv::GShape::GMAT); + int out_idx = static_cast(m_priv->out_blobs.size()); + it = m_priv->out_blobs.emplace(name, m_priv->call->yield(out_idx)).first; + m_priv->info->out_names.push_back(name); } return it->second; -}; +} #endif // GAPI_STANDALONE diff --git a/modules/gapi/src/backends/ie/bindings_ie.cpp b/modules/gapi/src/backends/ie/bindings_ie.cpp new file mode 100644 index 0000000000..35191d7bcb --- /dev/null +++ b/modules/gapi/src/backends/ie/bindings_ie.cpp @@ -0,0 +1,39 @@ +#include + +cv::gapi::ie::PyParams::PyParams(const std::string &tag, + const std::string &model, + const std::string &weights, + const std::string &device) + : m_priv(std::make_shared>(tag, model, weights, device)) { +} + +cv::gapi::ie::PyParams::PyParams(const std::string &tag, + const std::string &model, + const std::string &device) + : m_priv(std::make_shared>(tag, model, device)) { +} + +cv::gapi::GBackend cv::gapi::ie::PyParams::backend() const { + return m_priv->backend(); +} + +std::string cv::gapi::ie::PyParams::tag() const { + return m_priv->tag(); +} + +cv::util::any cv::gapi::ie::PyParams::params() const { + return m_priv->params(); +} + +cv::gapi::ie::PyParams cv::gapi::ie::params(const std::string &tag, + const std::string &model, + const std::string &weights, + const std::string &device) { + return {tag, model, weights, device}; +} + +cv::gapi::ie::PyParams cv::gapi::ie::params(const std::string &tag, + const std::string &model, + const std::string &device) { + return {tag, model, device}; +} From 3d4563913d59c66b9fbf9a3ff98c1b31fbb1839a Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Mon, 26 Oct 2020 22:55:43 +0300 Subject: [PATCH 053/152] Merge pull request #18600 from TolyaTalamanov:at/implement-render-using-stateful [G-API] Implement render using stateful kernel * Implement render using stateful kernel * Move freetype to backends folder * Fix freetype compilation * Fix freetype smoke test * Fix comments * Refactoring --- modules/gapi/CMakeLists.txt | 3 +- modules/gapi/src/api/render_ocv.cpp | 24 +-- modules/gapi/src/api/render_ocv.hpp | 6 +- .../{api => backends/render}/ft_render.cpp | 26 ++- .../{api => backends/render}/ft_render.hpp | 8 - .../render}/ft_render_priv.hpp | 2 +- .../gapi/src/backends/render/grenderocv.cpp | 54 +++++- .../gapi/src/backends/render/grenderocv.hpp | 55 ------ .../src/backends/render/grenderocvbackend.cpp | 161 ------------------ .../src/backends/render/grenderocvbackend.hpp | 73 -------- modules/gapi/test/render/ftp_render_test.cpp | 2 +- .../test/render/gapi_render_tests_ocv.cpp | 1 - 12 files changed, 83 insertions(+), 332 deletions(-) rename modules/gapi/src/{api => backends/render}/ft_render.cpp (92%) rename modules/gapi/src/{api => backends/render}/ft_render.hpp (91%) rename modules/gapi/src/{api => backends/render}/ft_render_priv.hpp (96%) delete mode 100644 modules/gapi/src/backends/render/grenderocv.hpp delete mode 100644 modules/gapi/src/backends/render/grenderocvbackend.cpp delete mode 100644 modules/gapi/src/backends/render/grenderocvbackend.hpp diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt index 137894cb8f..82b719ad4e 100644 --- a/modules/gapi/CMakeLists.txt +++ b/modules/gapi/CMakeLists.txt @@ -77,7 +77,6 @@ set(gapi_srcs src/api/render.cpp src/api/render_ocv.cpp src/api/ginfer.cpp - src/api/ft_render.cpp src/api/media.cpp src/api/rmat.cpp @@ -131,8 +130,8 @@ set(gapi_srcs src/backends/ie/giebackend/giewrapper.cpp # Render Backend. - src/backends/render/grenderocvbackend.cpp src/backends/render/grenderocv.cpp + src/backends/render/ft_render.cpp #PlaidML Backend src/backends/plaidml/gplaidmlcore.cpp diff --git a/modules/gapi/src/api/render_ocv.cpp b/modules/gapi/src/api/render_ocv.cpp index a298a958bd..5ab2e1dd07 100644 --- a/modules/gapi/src/api/render_ocv.cpp +++ b/modules/gapi/src/api/render_ocv.cpp @@ -2,7 +2,7 @@ #include // Kernel API's #include "api/render_ocv.hpp" -#include "api/ft_render.hpp" +#include "backends/render/ft_render.hpp" namespace cv { @@ -146,12 +146,8 @@ struct EmptyConverter template void drawPrimitivesOCV(cv::Mat& in, const cv::gapi::wip::draw::Prims& prims, - cv::gapi::wip::draw::FTTextRender* ftpr) + std::shared_ptr& ftpr) { -#ifndef HAVE_FREETYPE - cv::util::suppress_unused_warning(ftpr); -#endif - using namespace cv::gapi::wip::draw; ColorConverter converter; @@ -177,7 +173,6 @@ void drawPrimitivesOCV(cv::Mat& in, case Prim::index_of(): { -#ifdef HAVE_FREETYPE const auto& ftp = cv::util::get(p); const auto color = converter.cvtColor(ftp.color); @@ -196,9 +191,6 @@ void drawPrimitivesOCV(cv::Mat& in, cv::Point tl(ftp.org.x, ftp.org.y - mask.size().height + baseline); blendTextMask(in, mask, tl, color); -#else - cv::util::throw_error(std::runtime_error("FreeType not found !")); -#endif break; } @@ -251,16 +243,16 @@ void drawPrimitivesOCV(cv::Mat& in, } } -void drawPrimitivesOCVBGR(cv::Mat &in, - const cv::gapi::wip::draw::Prims &prims, - cv::gapi::wip::draw::FTTextRender* ftpr) +void drawPrimitivesOCVBGR(cv::Mat &in, + const cv::gapi::wip::draw::Prims &prims, + std::shared_ptr &ftpr) { drawPrimitivesOCV(in, prims, ftpr); } -void drawPrimitivesOCVYUV(cv::Mat &in, - const cv::gapi::wip::draw::Prims &prims, - cv::gapi::wip::draw::FTTextRender* ftpr) +void drawPrimitivesOCVYUV(cv::Mat &in, + const cv::gapi::wip::draw::Prims &prims, + std::shared_ptr &ftpr) { drawPrimitivesOCV(in, prims, ftpr); } diff --git a/modules/gapi/src/api/render_ocv.hpp b/modules/gapi/src/api/render_ocv.hpp index 91194dcdc1..a9a98f93fb 100644 --- a/modules/gapi/src/api/render_ocv.hpp +++ b/modules/gapi/src/api/render_ocv.hpp @@ -1,6 +1,6 @@ #include #include "render_priv.hpp" -#include "ft_render.hpp" +#include "backends/render/ft_render.hpp" #ifndef OPENCV_RENDER_OCV_HPP #define OPENCV_RENDER_OCV_HPP @@ -15,8 +15,8 @@ namespace draw { // FIXME only for tests -void GAPI_EXPORTS drawPrimitivesOCVYUV(cv::Mat& yuv, const Prims& prims, cv::gapi::wip::draw::FTTextRender* mc); -void GAPI_EXPORTS drawPrimitivesOCVBGR(cv::Mat& bgr, const Prims& prims, cv::gapi::wip::draw::FTTextRender* mc); +void GAPI_EXPORTS drawPrimitivesOCVYUV(cv::Mat& yuv, const Prims& prims, std::shared_ptr& mc); +void GAPI_EXPORTS drawPrimitivesOCVBGR(cv::Mat& bgr, const Prims& prims, std::shared_ptr& mc); } // namespace draw } // namespace wip diff --git a/modules/gapi/src/api/ft_render.cpp b/modules/gapi/src/backends/render/ft_render.cpp similarity index 92% rename from modules/gapi/src/api/ft_render.cpp rename to modules/gapi/src/backends/render/ft_render.cpp index 7561dff833..fcf84713ff 100644 --- a/modules/gapi/src/api/ft_render.cpp +++ b/modules/gapi/src/backends/render/ft_render.cpp @@ -5,11 +5,11 @@ // Copyright (C) 2019 Intel Corporation #include "precomp.hpp" +#include "ft_render.hpp" #ifdef HAVE_FREETYPE -#include "api/ft_render.hpp" -#include "api/ft_render_priv.hpp" +#include "ft_render_priv.hpp" #include #include @@ -166,6 +166,11 @@ void cv::gapi::wip::draw::FTTextRender::Priv::putText(cv::Mat& mat, "Failed to load char"); FT_Bitmap *bitmap = &(m_face->glyph->bitmap); + // FIXME: Skip glyph, if size is 0 + if (bitmap->rows == 0 || bitmap->width == 0) { + continue; + } + cv::Mat glyph(bitmap->rows, bitmap->width, CV_8UC1, bitmap->buffer, bitmap->pitch); int left = m_face->glyph->bitmap_left; @@ -211,4 +216,21 @@ void cv::gapi::wip::draw::FTTextRender::putText(cv::Mat& mat, m_priv->putText(mat, text, org, fh); } +#else + +cv::Size cv::gapi::wip::draw::FTTextRender::getTextSize(const std::wstring&, int, int*) +{ + cv::util::throw_error(std::runtime_error("Freetype not found")); +} + +void cv::gapi::wip::draw::FTTextRender::putText(cv::Mat&, const std::wstring&, const cv::Point&, int) +{ + cv::util::throw_error(std::runtime_error("Freetype not found")); +} + +cv::gapi::wip::draw::FTTextRender::FTTextRender(const std::string&) +{ + cv::util::throw_error(std::runtime_error("Freetype not found")); +} + #endif // HAVE_FREETYPE diff --git a/modules/gapi/src/api/ft_render.hpp b/modules/gapi/src/backends/render/ft_render.hpp similarity index 91% rename from modules/gapi/src/api/ft_render.hpp rename to modules/gapi/src/backends/render/ft_render.hpp index 2556c7269c..068c0d4d3f 100644 --- a/modules/gapi/src/api/ft_render.hpp +++ b/modules/gapi/src/backends/render/ft_render.hpp @@ -23,8 +23,6 @@ namespace wip namespace draw { -#ifdef HAVE_FREETYPE - class GAPI_EXPORTS FTTextRender { public: @@ -38,12 +36,6 @@ private: std::shared_ptr m_priv; }; -#else - -class GAPI_EXPORTS FTTextRender {}; - -#endif // HAVE_FREETYPE - } // namespace draw } // namespace wip } // namespace gapi diff --git a/modules/gapi/src/api/ft_render_priv.hpp b/modules/gapi/src/backends/render/ft_render_priv.hpp similarity index 96% rename from modules/gapi/src/api/ft_render_priv.hpp rename to modules/gapi/src/backends/render/ft_render_priv.hpp index 5a0679dd99..903f439b96 100644 --- a/modules/gapi/src/api/ft_render_priv.hpp +++ b/modules/gapi/src/backends/render/ft_render_priv.hpp @@ -10,7 +10,7 @@ #ifndef OPENCV_FT_RENDER_PRIV_HPP #define OPENCV_FT_RENDER_PRIV_HPP -#include "api/ft_render.hpp" +#include "ft_render.hpp" #include #include FT_FREETYPE_H diff --git a/modules/gapi/src/backends/render/grenderocv.cpp b/modules/gapi/src/backends/render/grenderocv.cpp index cb4fd1be3a..71be889d79 100644 --- a/modules/gapi/src/backends/render/grenderocv.cpp +++ b/modules/gapi/src/backends/render/grenderocv.cpp @@ -1,16 +1,21 @@ #include #include "api/render_ocv.hpp" -#include "backends/render/grenderocv.hpp" #include +#include -GAPI_RENDER_OCV_KERNEL(RenderBGROCVImpl, cv::gapi::wip::draw::GRenderBGR) +struct RenderOCVState +{ + std::shared_ptr ftpr; +}; + +GAPI_OCV_KERNEL_ST(RenderBGROCVImpl, cv::gapi::wip::draw::GRenderBGR, RenderOCVState) { static void run(const cv::Mat& in, const cv::gapi::wip::draw::Prims& prims, - cv::gapi::wip::draw::FTTextRender* ftpr, - cv::Mat& out) + cv::Mat& out, + RenderOCVState& state) { // NB: If in and out cv::Mats are the same object // we can avoid copy and render on out cv::Mat @@ -19,18 +24,33 @@ GAPI_RENDER_OCV_KERNEL(RenderBGROCVImpl, cv::gapi::wip::draw::GRenderBGR) in.copyTo(out); } - cv::gapi::wip::draw::drawPrimitivesOCVBGR(out, prims, ftpr); + cv::gapi::wip::draw::drawPrimitivesOCVBGR(out, prims, state.ftpr); + } + + static void setup(const cv::GMatDesc& /* in */, + const cv::GArrayDesc& /* prims */, + std::shared_ptr& state, + const cv::GCompileArgs& args) + { + using namespace cv::gapi::wip::draw; + auto opt_freetype_font = cv::gapi::getCompileArg(args); + state = std::make_shared(); + + if (opt_freetype_font.has_value()) + { + state->ftpr = std::make_shared(opt_freetype_font->path); + } } }; -GAPI_RENDER_OCV_KERNEL(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12) +GAPI_OCV_KERNEL_ST(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12, RenderOCVState) { static void run(const cv::Mat& in_y, const cv::Mat& in_uv, const cv::gapi::wip::draw::Prims& prims, - cv::gapi::wip::draw::FTTextRender* ftpr, cv::Mat& out_y, - cv::Mat& out_uv) + cv::Mat& out_uv, + RenderOCVState& state) { // NB: If in and out cv::Mats are the same object // we can avoid copy and render on out cv::Mat @@ -67,7 +87,7 @@ GAPI_RENDER_OCV_KERNEL(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12) cv::resize(in_uv, upsample_uv, in_uv.size() * 2, cv::INTER_LINEAR); cv::merge(std::vector{in_y, upsample_uv}, yuv); - cv::gapi::wip::draw::drawPrimitivesOCVYUV(yuv, prims, ftpr); + cv::gapi::wip::draw::drawPrimitivesOCVYUV(yuv, prims, state.ftpr); // YUV -> NV12 cv::Mat out_u, out_v, uv_plane; @@ -76,6 +96,22 @@ GAPI_RENDER_OCV_KERNEL(RenderNV12OCVImpl, cv::gapi::wip::draw::GRenderNV12) cv::merge(std::vector{chs[1], chs[2]}, uv_plane); cv::resize(uv_plane, out_uv, uv_plane.size() / 2, cv::INTER_LINEAR); } + + static void setup(const cv::GMatDesc& /* in_y */, + const cv::GMatDesc& /* in_uv */, + const cv::GArrayDesc& /* prims */, + std::shared_ptr& state, + const cv::GCompileArgs& args) + { + using namespace cv::gapi::wip::draw; + auto has_freetype_font = cv::gapi::getCompileArg(args); + state = std::make_shared(); + + if (has_freetype_font) + { + state->ftpr = std::make_shared(has_freetype_font->path); + } + } }; cv::gapi::GKernelPackage cv::gapi::render::ocv::kernels() diff --git a/modules/gapi/src/backends/render/grenderocv.hpp b/modules/gapi/src/backends/render/grenderocv.hpp deleted file mode 100644 index e5091042b2..0000000000 --- a/modules/gapi/src/backends/render/grenderocv.hpp +++ /dev/null @@ -1,55 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. -// -// Copyright (C) 2019 Intel Corporation - -#ifndef OPENCV_GAPI_GRENDEROCV_HPP -#define OPENCV_GAPI_GRENDEROCV_HPP - -#include -#include "api/render_priv.hpp" -#include "api/ft_render.hpp" - -namespace cv -{ -namespace gapi -{ -namespace render -{ -namespace ocv -{ - -GAPI_EXPORTS cv::gapi::GBackend backend(); - -template -struct add_type_to_tuple; - -template -struct add_type_to_tuple> -{ - using type = std::tuple; -}; - -template -class GRenderKernelImpl: public cv::detail::OCVCallHelper, - public cv::detail::KernelTag -{ - using InArgs = typename add_type_to_tuple::type; - using P = detail::OCVCallHelper; - -public: - using API = K; - - static cv::gapi::GBackend backend() { return cv::gapi::render::ocv::backend(); } - static cv::GCPUKernel kernel() { return GCPUKernel(&P::call); } -}; - -#define GAPI_RENDER_OCV_KERNEL(Name, API) struct Name: public cv::gapi::render::ocv::GRenderKernelImpl - -} // namespace ocv -} // namespace render -} // namespace gapi -} // namespace cv - -#endif // OPENCV_GAPI_GRENDEROCV_HPP diff --git a/modules/gapi/src/backends/render/grenderocvbackend.cpp b/modules/gapi/src/backends/render/grenderocvbackend.cpp deleted file mode 100644 index 413d0c3f9c..0000000000 --- a/modules/gapi/src/backends/render/grenderocvbackend.cpp +++ /dev/null @@ -1,161 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. -// -// Copyright (C) 2018-2020 Intel Corporation - -#include "precomp.hpp" - -#include -#include - -#include - -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "compiler/gobjref.hpp" -#include "compiler/gmodel.hpp" - -#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK! -#include "api/render_ocv.hpp" - -#include "backends/render/grenderocvbackend.hpp" - -#include -#include "api/ocv_mask_creator.hpp" -#include "api/ft_render.hpp" - - -using GRenderModel = ade::TypedGraph - < cv::gimpl::render::ocv::RenderUnit - >; - -// FIXME: Same issue with Typed and ConstTyped -using GConstRenderModel = ade::ConstTypedGraph - < cv::gimpl::render::ocv::RenderUnit - >; - -cv::gimpl::render::ocv::GRenderExecutable::GRenderExecutable(const ade::Graph &g, - const std::vector &nodes, - std::unique_ptr&& ftpr) - : m_g(g), m_gm(m_g), m_ftpr(std::move(ftpr)) { - GConstRenderModel gcm(m_g); - - auto is_op = [&](ade::NodeHandle nh) { - return m_gm.metadata(nh).get().t == NodeType::OP; - }; - - auto it = ade::util::find_if(nodes, is_op); - - GAPI_Assert(it != nodes.end()); - this_nh = *it; - - if (!std::none_of(std::next(it), nodes.end(), is_op)) { - util::throw_error(std::logic_error("Multi-node rendering is not supported!")); - } -} - -void cv::gimpl::render::ocv::GRenderExecutable::run(std::vector &&input_objs, - std::vector &&output_objs) { - GConstRenderModel gcm(m_g); - - for (auto& it : input_objs) magazine::bindInArg (m_res, it.first, it.second); - for (auto& it : output_objs) magazine::bindOutArg(m_res, it.first, it.second); - - const auto &op = m_gm.metadata(this_nh).get(); - - // Initialize kernel's execution context: - // - Input parameters - GCPUContext context; - context.m_args.reserve(op.args.size()); - using namespace std::placeholders; - ade::util::transform(op.args, - std::back_inserter(context.m_args), - std::bind(&GRenderExecutable::packArg, this, _1)); - - // - Output parameters. - for (const auto &out_it : ade::util::indexed(op.outs)) { - // FIXME: Can the same GArg type resolution mechanism be reused here? - const auto out_port = ade::util::index(out_it); - const auto out_desc = ade::util::value(out_it); - context.m_results[out_port] = magazine::getObjPtr(m_res, out_desc); - } - - auto k = gcm.metadata(this_nh).get().k; - - context.m_args.emplace_back(m_ftpr.get()); - - k.m_runF(context); - - for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second); - - // In/Out args clean-up is mandatory now with RMat - for (auto &it : input_objs) magazine::unbind(m_res, it.first); - for (auto &it : output_objs) magazine::unbind(m_res, it.first); -} - -cv::GArg cv::gimpl::render::ocv::GRenderExecutable::packArg(const cv::GArg &arg) { - // No API placeholders allowed at this point - // FIXME: this check has to be done somewhere in compilation stage. - GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT - && arg.kind != cv::detail::ArgKind::GSCALAR - && arg.kind != cv::detail::ArgKind::GARRAY); - - if (arg.kind != cv::detail::ArgKind::GOBJREF) { - util::throw_error(std::logic_error("Render supports G-types ONLY!")); - } - GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF); - - const cv::gimpl::RcDesc &ref = arg.get(); - switch (ref.shape) - { - case GShape::GMAT: return GArg(m_res.slot()[ref.id]); - case GShape::GARRAY: return GArg(m_res.slot().at(ref.id)); - default: - util::throw_error(std::logic_error("Unsupported GShape type")); - break; - } -} - -namespace { - class GRenderBackendImpl final: public cv::gapi::GBackend::Priv { - virtual void unpackKernel(ade::Graph &gr, - const ade::NodeHandle &op_node, - const cv::GKernelImpl &impl) override { - GRenderModel rm(gr); - auto render_impl = cv::util::any_cast(impl.opaque); - rm.metadata(op_node).set(cv::gimpl::render::ocv::RenderUnit{render_impl}); - } - - virtual EPtr compile(const ade::Graph &graph, - const cv::GCompileArgs& args, - const std::vector &nodes) const override { - - using namespace cv::gapi::wip::draw; - auto has_freetype_font = cv::gapi::getCompileArg(args); - std::unique_ptr ftpr; - if (has_freetype_font) - { -#ifndef HAVE_FREETYPE - throw std::runtime_error("Freetype not found"); -#else - ftpr.reset(new FTTextRender(has_freetype_font.value().path)); -#endif - } - return EPtr{new cv::gimpl::render::ocv::GRenderExecutable(graph, nodes, std::move(ftpr))}; - } - }; -} - -cv::gapi::GBackend cv::gapi::render::ocv::backend() { - static cv::gapi::GBackend this_backend(std::make_shared()); - return this_backend; -} diff --git a/modules/gapi/src/backends/render/grenderocvbackend.hpp b/modules/gapi/src/backends/render/grenderocvbackend.hpp deleted file mode 100644 index 69d388ffe6..0000000000 --- a/modules/gapi/src/backends/render/grenderocvbackend.hpp +++ /dev/null @@ -1,73 +0,0 @@ -// This file is part of OpenCV project. -// It is subject to the license terms in the LICENSE file found in the top-level directory -// of this distribution and at http://opencv.org/license.html. -// -// Copyright (C) 2019 Intel Corporation - -#ifndef OPENCV_GAPI_GRENDEROCVBACKEND_HPP -#define OPENCV_GAPI_GRENDEROCVBACKEND_HPP - -#include -#include -#include - -#include "api/gorigin.hpp" -#include "backends/common/gbackend.hpp" -#include "compiler/gislandmodel.hpp" - -#include "backends/render/grenderocv.hpp" - -#include - -namespace cv -{ -namespace gimpl -{ -namespace render -{ -namespace ocv -{ - -struct RenderUnit -{ - static const char *name() { return "RenderUnit"; } - GCPUKernel k; -}; - -class GRenderExecutable final: public GIslandExecutable -{ - const ade::Graph &m_g; - GModel::ConstGraph m_gm; - std::unique_ptr m_ftpr; - - // The only executable stuff in this graph - // (assuming it is always single-op) - ade::NodeHandle this_nh; - - //// Actual data of all resources in graph (both internal and external) - Mag m_res; - - //// Execution helpers - GArg packArg(const GArg &arg); - -public: - GRenderExecutable(const ade::Graph &graph, - const std::vector &nodes, - std::unique_ptr&& ftpr); - - virtual inline bool canReshape() const override { return false; } - - virtual inline void reshape(ade::Graph&, const GCompileArgs&) override { - GAPI_Assert(false); // Not implemented yet - } - - virtual void run(std::vector &&input_objs, - std::vector &&output_objs) override; -}; - -} // namespace ocv -} // namespace render -} // namespace gimpl -} // namespace cv - -#endif // OPENCV_GAPI_GRENDEROCVBACKEND_HPP diff --git a/modules/gapi/test/render/ftp_render_test.cpp b/modules/gapi/test/render/ftp_render_test.cpp index 5bdbb74e30..af9c5c6f13 100644 --- a/modules/gapi/test/render/ftp_render_test.cpp +++ b/modules/gapi/test/render/ftp_render_test.cpp @@ -13,7 +13,7 @@ #include -#include "api/ft_render.hpp" +#include "backends/render/ft_render.hpp" namespace opencv_test { diff --git a/modules/gapi/test/render/gapi_render_tests_ocv.cpp b/modules/gapi/test/render/gapi_render_tests_ocv.cpp index f727d977aa..88b5d88075 100644 --- a/modules/gapi/test/render/gapi_render_tests_ocv.cpp +++ b/modules/gapi/test/render/gapi_render_tests_ocv.cpp @@ -95,7 +95,6 @@ TEST_P(RenderNV12OCVTestFTexts, AccuracyTest) cv::compile_args(cv::gapi::wip::draw::freetype_font{ "/usr/share/fonts/truetype/wqy/wqy-microhei.ttc" }))); - } static std::wstring to_wstring(const char* bytes) From 1fe276d0418b2edff077b3e9ccff2aac4d10ec14 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 26 Oct 2020 22:58:30 +0000 Subject: [PATCH 054/152] core: move inline code from mat.inl.hpp (OpenCV 4.x additions) base commit: aac7c5465ba6ccfe0dc665ab0bae87f765e616ba --- modules/core/include/opencv2/core/mat.inl.hpp | 121 ------------------ modules/core/src/matrix.cpp | 60 +++++++++ modules/core/src/umatrix.cpp | 59 +++++++++ 3 files changed, 119 insertions(+), 121 deletions(-) diff --git a/modules/core/include/opencv2/core/mat.inl.hpp b/modules/core/include/opencv2/core/mat.inl.hpp index 36593563a9..d6296f8e2e 100644 --- a/modules/core/include/opencv2/core/mat.inl.hpp +++ b/modules/core/include/opencv2/core/mat.inl.hpp @@ -1112,67 +1112,6 @@ void Mat::push_back(const std::vector<_Tp>& v) push_back(Mat(v)); } -inline -Mat::Mat(Mat&& m) - : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), - datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator), - u(m.u), size(&rows) -{ - if (m.dims <= 2) // move new step/size info - { - step[0] = m.step[0]; - step[1] = m.step[1]; - } - else - { - CV_DbgAssert(m.step.p != m.step.buf); - step.p = m.step.p; - size.p = m.size.p; - m.step.p = m.step.buf; - m.size.p = &m.rows; - } - m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; - m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL; - m.allocator = NULL; - m.u = NULL; -} - -inline -Mat& Mat::operator = (Mat&& m) -{ - if (this == &m) - return *this; - - release(); - flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; data = m.data; - datastart = m.datastart; dataend = m.dataend; datalimit = m.datalimit; allocator = m.allocator; - u = m.u; - if (step.p != step.buf) // release self step/size - { - fastFree(step.p); - step.p = step.buf; - size.p = &rows; - } - if (m.dims <= 2) // move new step/size info - { - step[0] = m.step[0]; - step[1] = m.step[1]; - } - else - { - CV_DbgAssert(m.step.p != m.step.buf); - step.p = m.step.p; - size.p = m.size.p; - m.step.p = m.step.buf; - m.size.p = &m.rows; - } - m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; - m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL; - m.allocator = NULL; - m.u = NULL; - return *this; -} - ///////////////////////////// MatSize //////////////////////////// @@ -3342,66 +3281,6 @@ size_t UMat::step1(int i) const return step.p[i] / elemSize1(); } -inline -UMat::UMat(UMat&& m) -: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator), - usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows) -{ - if (m.dims <= 2) // move new step/size info - { - step[0] = m.step[0]; - step[1] = m.step[1]; - } - else - { - CV_DbgAssert(m.step.p != m.step.buf); - step.p = m.step.p; - size.p = m.size.p; - m.step.p = m.step.buf; - m.size.p = &m.rows; - } - m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; - m.allocator = NULL; - m.u = NULL; - m.offset = 0; -} - -inline -UMat& UMat::operator = (UMat&& m) -{ - if (this == &m) - return *this; - release(); - flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; - allocator = m.allocator; usageFlags = m.usageFlags; - u = m.u; - offset = m.offset; - if (step.p != step.buf) // release self step/size - { - fastFree(step.p); - step.p = step.buf; - size.p = &rows; - } - if (m.dims <= 2) // move new step/size info - { - step[0] = m.step[0]; - step[1] = m.step[1]; - } - else - { - CV_DbgAssert(m.step.p != m.step.buf); - step.p = m.step.p; - size.p = m.size.p; - m.step.p = m.step.buf; - m.size.p = &m.rows; - } - m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; - m.allocator = NULL; - m.u = NULL; - m.offset = 0; - return *this; -} - inline bool UMatData::hostCopyObsolete() const { return (flags & HOST_COPY_OBSOLETE) != 0; } inline bool UMatData::deviceCopyObsolete() const { return (flags & DEVICE_COPY_OBSOLETE) != 0; } diff --git a/modules/core/src/matrix.cpp b/modules/core/src/matrix.cpp index 6c874832c9..122b383379 100644 --- a/modules/core/src/matrix.cpp +++ b/modules/core/src/matrix.cpp @@ -599,6 +599,66 @@ size_t Mat::total(int startDim, int endDim) const } +Mat::Mat(Mat&& m) + : flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), data(m.data), + datastart(m.datastart), dataend(m.dataend), datalimit(m.datalimit), allocator(m.allocator), + u(m.u), size(&rows) +{ + if (m.dims <= 2) // move new step/size info + { + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + { + CV_Assert(m.step.p != m.step.buf); + step.p = m.step.p; + size.p = m.size.p; + m.step.p = m.step.buf; + m.size.p = &m.rows; + } + m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; + m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL; + m.allocator = NULL; + m.u = NULL; +} + + +Mat& Mat::operator=(Mat&& m) +{ + if (this == &m) + return *this; + + release(); + flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; data = m.data; + datastart = m.datastart; dataend = m.dataend; datalimit = m.datalimit; allocator = m.allocator; + u = m.u; + if (step.p != step.buf) // release self step/size + { + fastFree(step.p); + step.p = step.buf; + size.p = &rows; + } + if (m.dims <= 2) // move new step/size info + { + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + { + CV_Assert(m.step.p != m.step.buf); + step.p = m.step.p; + size.p = m.size.p; + m.step.p = m.step.buf; + m.size.p = &m.rows; + } + m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; + m.data = NULL; m.datastart = NULL; m.dataend = NULL; m.datalimit = NULL; + m.allocator = NULL; + m.u = NULL; + return *this; +} + void Mat::create(int d, const int* _sizes, int _type) { diff --git a/modules/core/src/umatrix.cpp b/modules/core/src/umatrix.cpp index 0a2de6017f..0ec6270a70 100644 --- a/modules/core/src/umatrix.cpp +++ b/modules/core/src/umatrix.cpp @@ -375,6 +375,65 @@ size_t UMat::total() const } +UMat::UMat(UMat&& m) +: flags(m.flags), dims(m.dims), rows(m.rows), cols(m.cols), allocator(m.allocator), + usageFlags(m.usageFlags), u(m.u), offset(m.offset), size(&rows) +{ + if (m.dims <= 2) // move new step/size info + { + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + { + CV_DbgAssert(m.step.p != m.step.buf); + step.p = m.step.p; + size.p = m.size.p; + m.step.p = m.step.buf; + m.size.p = &m.rows; + } + m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; + m.allocator = NULL; + m.u = NULL; + m.offset = 0; +} + +UMat& UMat::operator=(UMat&& m) +{ + if (this == &m) + return *this; + release(); + flags = m.flags; dims = m.dims; rows = m.rows; cols = m.cols; + allocator = m.allocator; usageFlags = m.usageFlags; + u = m.u; + offset = m.offset; + if (step.p != step.buf) // release self step/size + { + fastFree(step.p); + step.p = step.buf; + size.p = &rows; + } + if (m.dims <= 2) // move new step/size info + { + step[0] = m.step[0]; + step[1] = m.step[1]; + } + else + { + CV_DbgAssert(m.step.p != m.step.buf); + step.p = m.step.p; + size.p = m.size.p; + m.step.p = m.step.buf; + m.size.p = &m.rows; + } + m.flags = MAGIC_VAL; m.dims = m.rows = m.cols = 0; + m.allocator = NULL; + m.u = NULL; + m.offset = 0; + return *this; +} + + MatAllocator* UMat::getStdAllocator() { #ifdef HAVE_OPENCL From d011383a3d1b6430ade24857c5e74942246b89c4 Mon Sep 17 00:00:00 2001 From: Mikkel Green Date: Mon, 26 Oct 2020 16:04:55 -0700 Subject: [PATCH 055/152] Bugfix: Manual page sizes now overwrite the default page size if they are both specified. Remove redudant .upper() call, 1 to 1 key matching enforced by argparse choices. --- doc/pattern_tools/gen_pattern.py | 24 ++++++++++++++---------- 1 file changed, 14 insertions(+), 10 deletions(-) diff --git a/doc/pattern_tools/gen_pattern.py b/doc/pattern_tools/gen_pattern.py index 1f90615736..a6ffc7ca7e 100755 --- a/doc/pattern_tools/gen_pattern.py +++ b/doc/pattern_tools/gen_pattern.py @@ -92,11 +92,11 @@ def main(): dest="square_size", type=float) parser.add_argument("-R", "--radius_rate", help="circles_radius = square_size/radius_rate", default="5.0", action="store", dest="radius_rate", type=float) - parser.add_argument("-w", "--page_width", help="page width in units", default="216", action="store", + parser.add_argument("-w", "--page_width", help="page width in units", default=argparse.SUPPRESS, action="store", dest="page_width", type=float) - parser.add_argument("-h", "--page_height", help="page height in units", default="279", action="store", - dest="page_width", type=float) - parser.add_argument("-a", "--page_size", help="page size, supersedes -h -w arguments", default="A4", action="store", + parser.add_argument("-h", "--page_height", help="page height in units", default=argparse.SUPPRESS, action="store", + dest="page_height", type=float) + parser.add_argument("-a", "--page_size", help="page size, superseded if -h and -w are set", default="A4", action="store", dest="page_size", choices=["A0", "A1", "A2", "A3", "A4", "A5"]) args = parser.parse_args() @@ -111,12 +111,16 @@ def main(): units = args.units square_size = args.square_size radius_rate = args.radius_rate - page_size = args.page_size - # page size dict (ISO standard, mm) for easy lookup. format - size: [width, height] - page_sizes = {"A0": [840, 1188], "A1": [594, 840], "A2": [420, 594], "A3": [297, 420], "A4": [210, 297], - "A5": [148, 210]} - page_width = page_sizes[page_size.upper()][0] - page_height = page_sizes[page_size.upper()][1] + if 'page_width' and 'page_height' in args: + page_width = args.page_width + page_height = args.page_height + else: + page_size = args.page_size + # page size dict (ISO standard, mm) for easy lookup. format - size: [width, height] + page_sizes = {"A0": [840, 1188], "A1": [594, 840], "A2": [420, 594], "A3": [297, 420], "A4": [210, 297], + "A5": [148, 210]} + page_width = page_sizes[page_size][0] + page_height = page_sizes[page_size][1] pm = PatternMaker(columns, rows, output, units, square_size, radius_rate, page_width, page_height) # dict for easy lookup of pattern type mp = {"circles": pm.make_circles_pattern, "acircles": pm.make_acircles_pattern, From 0f7b2eb79f8a2069177cb657e394093ef2ca7c5d Mon Sep 17 00:00:00 2001 From: APrigarina Date: Wed, 28 Oct 2020 12:48:42 +0300 Subject: [PATCH 056/152] fix curved qrcodes decoding --- modules/objdetect/src/qrcode.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp index 5b86f74614..449e6e6d32 100644 --- a/modules/objdetect/src/qrcode.cpp +++ b/modules/objdetect/src/qrcode.cpp @@ -993,7 +993,7 @@ protected: bool computeClosestPoints(const vector &result_integer_hull); bool computeSidesPoints(const vector &result_integer_hull); vector getPointsNearUnstablePoint(const vector &side, int start, int end, int step); - bool findAndAddStablePoint(const vector &result_integer_hull); + bool findAndAddStablePoint(); bool findIndexesCurvedSides(); bool findIncompleteIndexesCurvedSides(); Mat getPatternsMask(); @@ -1274,7 +1274,7 @@ vector QRDecode::getPointsNearUnstablePoint(const vector &side, in return points; } -bool QRDecode::findAndAddStablePoint(const vector &result_integer_hull) +bool QRDecode::findAndAddStablePoint() { size_t idx_unstable_point = unstable_pair.first; Point unstable_point = unstable_pair.second; @@ -1385,7 +1385,7 @@ bool QRDecode::findAndAddStablePoint(const vector &result_integer_hull) bool add_stable_point = true; - for (size_t i = 0; i < result_integer_hull.size(); i++) + for (size_t i = 0; i < original_points.size(); i++) { if(arePointsNearest(stable_point, original_points[i], 3.0)) { @@ -2211,7 +2211,7 @@ bool QRDecode::preparingCurvedQRCodes() return false; if (!computeSidesPoints(result_integer_hull)) return false; - if (!findAndAddStablePoint(result_integer_hull)) + if (!findAndAddStablePoint()) return false; if (!findIndexesCurvedSides()) return false; From 364702b1c98943e4e306e745389d3f464010f069 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Tue, 27 Oct 2020 19:00:25 +0000 Subject: [PATCH 057/152] cmake(3rdparty): use EXCLUDE_FROM_ALL --- 3rdparty/carotene/CMakeLists.txt | 4 ++-- 3rdparty/cpufeatures/CMakeLists.txt | 4 ++-- 3rdparty/ippicv/CMakeLists.txt | 4 ++-- 3rdparty/ittnotify/CMakeLists.txt | 4 ++-- 3rdparty/libjasper/CMakeLists.txt | 4 ++-- 3rdparty/libjpeg-turbo/CMakeLists.txt | 4 ++-- 3rdparty/libjpeg/CMakeLists.txt | 4 ++-- 3rdparty/libpng/CMakeLists.txt | 4 ++-- 3rdparty/libtiff/CMakeLists.txt | 4 ++-- 3rdparty/libwebp/CMakeLists.txt | 4 ++-- 3rdparty/openexr/CMakeLists.txt | 4 ++-- 3rdparty/protobuf/CMakeLists.txt | 4 ++-- 3rdparty/quirc/CMakeLists.txt | 4 ++-- 3rdparty/tbb/CMakeLists.txt | 3 ++- 3rdparty/zlib/CMakeLists.txt | 2 +- cmake/OpenCVUtils.cmake | 6 ++++++ 16 files changed, 35 insertions(+), 28 deletions(-) diff --git a/3rdparty/carotene/CMakeLists.txt b/3rdparty/carotene/CMakeLists.txt index 4319815708..bd26a2d7ef 100644 --- a/3rdparty/carotene/CMakeLists.txt +++ b/3rdparty/carotene/CMakeLists.txt @@ -27,7 +27,7 @@ if(CMAKE_COMPILER_IS_GNUCC) endif() endif() -add_library(carotene_objs OBJECT +add_library(carotene_objs OBJECT EXCLUDE_FROM_ALL ${carotene_headers} ${carotene_sources} ) @@ -41,4 +41,4 @@ if(WITH_NEON) endif() # we add dummy file to fix XCode build -add_library(carotene STATIC EXCLUDE_FROM_ALL "$" "${CAROTENE_SOURCE_DIR}/dummy.cpp") +add_library(carotene STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} "$" "${CAROTENE_SOURCE_DIR}/dummy.cpp") diff --git a/3rdparty/cpufeatures/CMakeLists.txt b/3rdparty/cpufeatures/CMakeLists.txt index 92bce6abf8..bf7af0ecde 100644 --- a/3rdparty/cpufeatures/CMakeLists.txt +++ b/3rdparty/cpufeatures/CMakeLists.txt @@ -14,7 +14,7 @@ if(NOT DEFINED CPUFEATURES_SOURCES) endif() include_directories(${CPUFEATURES_INCLUDE_DIRS}) -add_library(${OPENCV_CPUFEATURES_TARGET_NAME} STATIC ${CPUFEATURES_SOURCES}) +add_library(${OPENCV_CPUFEATURES_TARGET_NAME} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${CPUFEATURES_SOURCES}) set_target_properties(${OPENCV_CPUFEATURES_TARGET_NAME} PROPERTIES OUTPUT_NAME cpufeatures @@ -29,7 +29,7 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${OPENCV_CPUFEATURES_TARGET_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${OPENCV_CPUFEATURES_TARGET_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(cpufeatures LICENSE README.md) diff --git a/3rdparty/ippicv/CMakeLists.txt b/3rdparty/ippicv/CMakeLists.txt index 7931832737..43ad806dd7 100644 --- a/3rdparty/ippicv/CMakeLists.txt +++ b/3rdparty/ippicv/CMakeLists.txt @@ -17,7 +17,7 @@ file(GLOB lib_hdrs ${IPP_IW_PATH}/include/*.h ${IPP_IW_PATH}/include/iw/*.h ${IP # Define the library target: # ---------------------------------------------------------------------------------- -add_library(${IPP_IW_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs}) +add_library(${IPP_IW_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs}) if(UNIX) if(CV_GCC OR CV_CLANG OR CV_ICC) @@ -41,5 +41,5 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${IPP_IW_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${IPP_IW_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() diff --git a/3rdparty/ittnotify/CMakeLists.txt b/3rdparty/ittnotify/CMakeLists.txt index c2caf76723..a227aff88e 100644 --- a/3rdparty/ittnotify/CMakeLists.txt +++ b/3rdparty/ittnotify/CMakeLists.txt @@ -37,7 +37,7 @@ set(ITT_SRCS src/ittnotify/jitprofiling.c ) -add_library(${ITT_LIBRARY} STATIC ${ITT_SRCS} ${ITT_PUBLIC_HDRS} ${ITT_PRIVATE_HDRS}) +add_library(${ITT_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${ITT_SRCS} ${ITT_PUBLIC_HDRS} ${ITT_PRIVATE_HDRS}) if(NOT WIN32) if(HAVE_DL_LIBRARY) @@ -60,7 +60,7 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${ITT_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${ITT_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(ittnotify src/ittnotify/LICENSE.BSD src/ittnotify/LICENSE.GPL) diff --git a/3rdparty/libjasper/CMakeLists.txt b/3rdparty/libjasper/CMakeLists.txt index 897b6ae606..9f05d89733 100644 --- a/3rdparty/libjasper/CMakeLists.txt +++ b/3rdparty/libjasper/CMakeLists.txt @@ -17,7 +17,7 @@ file(GLOB lib_ext_hdrs jasper/*.h) # Define the library target: # ---------------------------------------------------------------------------------- -add_library(${JASPER_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs} ${lib_ext_hdrs}) +add_library(${JASPER_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs} ${lib_ext_hdrs}) if(WIN32 AND NOT MINGW) add_definitions(-DJAS_WIN_MSVC_BUILD) @@ -46,7 +46,7 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${JASPER_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${JASPER_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(jasper LICENSE README copyright) diff --git a/3rdparty/libjpeg-turbo/CMakeLists.txt b/3rdparty/libjpeg-turbo/CMakeLists.txt index 374d7875de..8da98b6020 100644 --- a/3rdparty/libjpeg-turbo/CMakeLists.txt +++ b/3rdparty/libjpeg-turbo/CMakeLists.txt @@ -106,7 +106,7 @@ set(JPEG_SOURCES ${JPEG_SOURCES} jsimd_none.c) ocv_list_add_prefix(JPEG_SOURCES src/) -add_library(${JPEG_LIBRARY} STATIC ${JPEG_SOURCES} ${SIMD_OBJS}) +add_library(${JPEG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${JPEG_SOURCES} ${SIMD_OBJS}) set_target_properties(${JPEG_LIBRARY} PROPERTIES OUTPUT_NAME ${JPEG_LIBRARY} @@ -121,7 +121,7 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(libjpeg-turbo README.md LICENSE.md README.ijg) diff --git a/3rdparty/libjpeg/CMakeLists.txt b/3rdparty/libjpeg/CMakeLists.txt index b50fc09840..c0524cc38a 100644 --- a/3rdparty/libjpeg/CMakeLists.txt +++ b/3rdparty/libjpeg/CMakeLists.txt @@ -19,7 +19,7 @@ endif() # Define the library target: # ---------------------------------------------------------------------------------- -add_library(${JPEG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs}) +add_library(${JPEG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs}) if(CV_GCC OR CV_CLANG) set_source_files_properties(jcdctmgr.c PROPERTIES COMPILE_FLAGS "-O1") @@ -42,7 +42,7 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${JPEG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(libjpeg README) diff --git a/3rdparty/libpng/CMakeLists.txt b/3rdparty/libpng/CMakeLists.txt index 31e77676e8..efa59627eb 100644 --- a/3rdparty/libpng/CMakeLists.txt +++ b/3rdparty/libpng/CMakeLists.txt @@ -74,7 +74,7 @@ if(MSVC) add_definitions(-D_CRT_SECURE_NO_DEPRECATE) endif(MSVC) -add_library(${PNG_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs}) +add_library(${PNG_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs}) target_link_libraries(${PNG_LIBRARY} ${ZLIB_LIBRARIES}) ocv_warnings_disable(CMAKE_C_FLAGS -Wundef -Wcast-align -Wimplicit-fallthrough -Wunused-parameter -Wsign-compare) @@ -92,7 +92,7 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${PNG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${PNG_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(libpng LICENSE README) diff --git a/3rdparty/libtiff/CMakeLists.txt b/3rdparty/libtiff/CMakeLists.txt index 16cb598955..61e40b2885 100644 --- a/3rdparty/libtiff/CMakeLists.txt +++ b/3rdparty/libtiff/CMakeLists.txt @@ -462,7 +462,7 @@ ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4456 /wd4457 /wd4312) # vs2015 ocv_warnings_disable(CMAKE_C_FLAGS /wd4267 /wd4244 /wd4018 /wd4311 /wd4312) -add_library(${TIFF_LIBRARY} STATIC ${lib_srcs}) +add_library(${TIFF_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs}) target_link_libraries(${TIFF_LIBRARY} ${ZLIB_LIBRARIES}) set_target_properties(${TIFF_LIBRARY} @@ -479,7 +479,7 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${TIFF_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${TIFF_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(libtiff COPYRIGHT) diff --git a/3rdparty/libwebp/CMakeLists.txt b/3rdparty/libwebp/CMakeLists.txt index 83884c9d4d..80ab0b86ab 100644 --- a/3rdparty/libwebp/CMakeLists.txt +++ b/3rdparty/libwebp/CMakeLists.txt @@ -34,7 +34,7 @@ endif() add_definitions(-DWEBP_USE_THREAD) -add_library(${WEBP_LIBRARY} STATIC ${lib_srcs} ${lib_hdrs}) +add_library(${WEBP_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_srcs} ${lib_hdrs}) if(ANDROID) target_link_libraries(${WEBP_LIBRARY} ${CPUFEATURES_LIBRARIES}) endif() @@ -59,6 +59,6 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${WEBP_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${WEBP_LIBRARY} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() diff --git a/3rdparty/openexr/CMakeLists.txt b/3rdparty/openexr/CMakeLists.txt index 2ee5146a3d..88f60b23c0 100644 --- a/3rdparty/openexr/CMakeLists.txt +++ b/3rdparty/openexr/CMakeLists.txt @@ -125,7 +125,7 @@ if(MSVC AND CV_ICC) set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Qrestrict") endif() -add_library(IlmImf STATIC ${lib_hdrs} ${lib_srcs}) +add_library(IlmImf STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${lib_hdrs} ${lib_srcs}) target_link_libraries(IlmImf ${ZLIB_LIBRARIES}) set_target_properties(IlmImf @@ -142,7 +142,7 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(IlmImf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(IlmImf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(openexr LICENSE AUTHORS.ilmbase AUTHORS.openexr) diff --git a/3rdparty/protobuf/CMakeLists.txt b/3rdparty/protobuf/CMakeLists.txt index fc9497f928..c71bf9faff 100644 --- a/3rdparty/protobuf/CMakeLists.txt +++ b/3rdparty/protobuf/CMakeLists.txt @@ -141,7 +141,7 @@ append_if_exist(Protobuf_SRCS ) include_directories(BEFORE "${PROTOBUF_ROOT}/src") # ensure using if own headers: https://github.com/opencv/opencv/issues/13328 -add_library(libprotobuf STATIC ${Protobuf_SRCS}) +add_library(libprotobuf STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${Protobuf_SRCS}) target_include_directories(libprotobuf SYSTEM PUBLIC $) set_target_properties(libprotobuf PROPERTIES @@ -157,7 +157,7 @@ get_protobuf_version(Protobuf_VERSION "${PROTOBUF_ROOT}/src") set(Protobuf_VERSION ${Protobuf_VERSION} CACHE INTERNAL "" FORCE) if(NOT BUILD_SHARED_LIBS) - ocv_install_target(libprotobuf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(libprotobuf EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(protobuf LICENSE README.md) diff --git a/3rdparty/quirc/CMakeLists.txt b/3rdparty/quirc/CMakeLists.txt index 7a6b2bb222..c0464c16ae 100644 --- a/3rdparty/quirc/CMakeLists.txt +++ b/3rdparty/quirc/CMakeLists.txt @@ -8,7 +8,7 @@ ocv_include_directories(${CURR_INCLUDE_DIR}) file(GLOB_RECURSE quirc_headers RELATIVE "${CMAKE_CURRENT_LIST_DIR}" "include/*.h") file(GLOB_RECURSE quirc_sources RELATIVE "${CMAKE_CURRENT_LIST_DIR}" "src/*.c") -add_library(${PROJECT_NAME} STATIC ${quirc_headers} ${quirc_sources}) +add_library(${PROJECT_NAME} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${quirc_headers} ${quirc_sources}) ocv_warnings_disable(CMAKE_C_FLAGS -Wunused-variable -Wshadow) set_target_properties(${PROJECT_NAME} @@ -24,7 +24,7 @@ if(ENABLE_SOLUTION_FOLDERS) endif() if(NOT BUILD_SHARED_LIBS) - ocv_install_target(${PROJECT_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev) + ocv_install_target(${PROJECT_NAME} EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL) endif() ocv_install_3rdparty_licenses(${PROJECT_NAME} LICENSE) diff --git a/3rdparty/tbb/CMakeLists.txt b/3rdparty/tbb/CMakeLists.txt index 2aa9127da0..a085b0f3ca 100644 --- a/3rdparty/tbb/CMakeLists.txt +++ b/3rdparty/tbb/CMakeLists.txt @@ -108,7 +108,7 @@ set(tbb_version_file "version_string.ver") configure_file("${CMAKE_CURRENT_SOURCE_DIR}/${tbb_version_file}.cmakein" "${CMAKE_CURRENT_BINARY_DIR}/${tbb_version_file}" @ONLY) list(APPEND TBB_SOURCE_FILES "${CMAKE_CURRENT_BINARY_DIR}/${tbb_version_file}") -add_library(tbb ${TBB_SOURCE_FILES}) +add_library(tbb ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${TBB_SOURCE_FILES}) target_compile_definitions(tbb PUBLIC TBB_USE_GCC_BUILTINS=1 __TBB_GCC_BUILTIN_ATOMICS_PRESENT=1 @@ -165,6 +165,7 @@ ocv_install_target(tbb EXPORT OpenCVModules RUNTIME DESTINATION ${OPENCV_BIN_INSTALL_PATH} COMPONENT libs LIBRARY DESTINATION ${OPENCV_LIB_INSTALL_PATH} COMPONENT libs ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev + OPTIONAL ) ocv_install_3rdparty_licenses(tbb "${tbb_src_dir}/LICENSE" "${tbb_src_dir}/README") diff --git a/3rdparty/zlib/CMakeLists.txt b/3rdparty/zlib/CMakeLists.txt index 553700bacc..9758861a6b 100644 --- a/3rdparty/zlib/CMakeLists.txt +++ b/3rdparty/zlib/CMakeLists.txt @@ -76,7 +76,7 @@ set(ZLIB_SRCS zutil.c ) -add_library(${ZLIB_LIBRARY} STATIC ${ZLIB_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS}) +add_library(${ZLIB_LIBRARY} STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL} ${ZLIB_SRCS} ${ZLIB_PUBLIC_HDRS} ${ZLIB_PRIVATE_HDRS}) set_target_properties(${ZLIB_LIBRARY} PROPERTIES DEFINE_SYMBOL ZLIB_DLL) ocv_warnings_disable(CMAKE_C_FLAGS -Wshorten-64-to-32 -Wattributes -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations -Wshift-negative-value diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index 610f0e6437..693a840ffe 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -1890,3 +1890,9 @@ function(ocv_update_file filepath content) file(WRITE "${filepath}" "${content}") endif() endfunction() + +if(NOT BUILD_SHARED_LIBS AND (CMAKE_VERSION VERSION_LESS "3.14.0")) + ocv_update(OPENCV_3RDPARTY_EXCLUDE_FROM_ALL "") # avoid CMake warnings: https://gitlab.kitware.com/cmake/cmake/-/issues/18938 +else() + ocv_update(OPENCV_3RDPARTY_EXCLUDE_FROM_ALL "EXCLUDE_FROM_ALL") +endif() From 56d2b7137ccf5258f533db9db7b3d8e60084041c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jack=C2=B7Boos=C2=B7Yu?= <47264268+JackBoosY@users.noreply.github.com> Date: Thu, 29 Oct 2020 06:50:25 -0700 Subject: [PATCH 058/152] Merge pull request #18658 from JackBoosY:master * Fix cmake configure error * judge the cmake version * Add comments --- modules/videoio/cmake/init.cmake | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/modules/videoio/cmake/init.cmake b/modules/videoio/cmake/init.cmake index 1efef12c5e..81d5d9fe87 100644 --- a/modules/videoio/cmake/init.cmake +++ b/modules/videoio/cmake/init.cmake @@ -12,8 +12,16 @@ function(ocv_add_external_target name inc link def) set_target_properties(ocv.3rdparty.${name} PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${inc}" INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${inc}" - INTERFACE_LINK_LIBRARIES "${link}" INTERFACE_COMPILE_DEFINITIONS "${def}") + # When cmake version is greater than or equal to 3.11, INTERFACE_LINK_LIBRARIES no longer applies to interface library + # See https://github.com/opencv/opencv/pull/18658 + if (CMAKE_VERSION VERSION_LESS 3.11) + set_target_properties(ocv.3rdparty.${name} PROPERTIES + INTERFACE_LINK_LIBRARIES "${link}") + else() + target_link_libraries(ocv.3rdparty.${name} INTERFACE ${link}) + endif() + # if(NOT BUILD_SHARED_LIBS) install(TARGETS ocv.3rdparty.${name} EXPORT OpenCVModules) endif() From ca8bb8d0532c3070075db66947b58e8894650fdc Mon Sep 17 00:00:00 2001 From: Dmitry Matveev Date: Wed, 18 Mar 2020 02:38:24 +0300 Subject: [PATCH 059/152] G-API: Introduce streaming::desync and infer(ROI) - desync() is a new (and for now, the only one) intrinsic which splits the graph execution into asynchronous parts when running in Streaming mode; - desync() makes no effect when compiling in Traditional mode; - Added tests on desync() working in various scenarios; - Extended GStreamingExecutor to support desync(); also extended GStreamingCompiled() with a new version of pull() returning a vector of optional values; - Fixed various issues with storing the type information & proper construction callbacks for GArray<> and GOpaque; - Introduced a new infer(Roi,GMat) overload with a sample; - Introduced an internal API for Islands to control fusion procedure (to fuse or not to fuse); - Introduced handleStopStream() callback for island executables; - Added GCompileArgs to metadata of the graph (required for other features). --- modules/gapi/CMakeLists.txt | 2 + modules/gapi/include/opencv2/gapi.hpp | 4 + modules/gapi/include/opencv2/gapi/garray.hpp | 23 +- modules/gapi/include/opencv2/gapi/gkernel.hpp | 59 ++- modules/gapi/include/opencv2/gapi/gopaque.hpp | 28 +- .../gapi/include/opencv2/gapi/gstreaming.hpp | 122 +++++ .../include/opencv2/gapi/gtype_traits.hpp | 23 + modules/gapi/include/opencv2/gapi/infer.hpp | 63 ++- .../include/opencv2/gapi/streaming/desync.hpp | 84 +++ modules/gapi/samples/infer_single_roi.cpp | 264 ++++++++++ modules/gapi/src/api/gbackend.cpp | 15 + modules/gapi/src/api/gbackend_priv.hpp | 18 +- modules/gapi/src/api/ginfer.cpp | 3 + modules/gapi/src/api/kernels_streaming.cpp | 74 +++ modules/gapi/src/backends/ie/giebackend.cpp | 60 +++ modules/gapi/src/backends/ocl/goclbackend.cpp | 4 + modules/gapi/src/compiler/gcompiler.cpp | 8 + modules/gapi/src/compiler/gislandmodel.cpp | 20 +- modules/gapi/src/compiler/gislandmodel.hpp | 21 + modules/gapi/src/compiler/gmodel.cpp | 19 +- modules/gapi/src/compiler/gmodel.hpp | 70 ++- modules/gapi/src/compiler/gmodelbuilder.cpp | 9 +- modules/gapi/src/compiler/gobjref.hpp | 12 +- modules/gapi/src/compiler/gstreaming.cpp | 11 + modules/gapi/src/compiler/gstreaming_priv.hpp | 1 + modules/gapi/src/compiler/passes/exec.cpp | 113 ++-- modules/gapi/src/compiler/passes/intrin.cpp | 305 +++++++++++ modules/gapi/src/compiler/passes/kernels.cpp | 48 +- modules/gapi/src/compiler/passes/passes.hpp | 9 +- modules/gapi/src/compiler/transactions.hpp | 81 ++- modules/gapi/src/executor/conc_queue.hpp | 3 +- .../gapi/src/executor/gstreamingexecutor.cpp | 375 +++++++++++-- .../gapi/src/executor/gstreamingexecutor.hpp | 82 ++- modules/gapi/src/executor/last_value.hpp | 105 ++++ .../internal/gapi_int_gmodel_builder_test.cpp | 10 +- .../internal/gapi_int_island_fusion_tests.cpp | 60 ++- .../test/internal/gapi_transactions_test.cpp | 161 +++++- modules/gapi/test/own/conc_queue_tests.cpp | 6 +- .../test/own/last_written_value_tests.cpp | 156 ++++++ .../test/streaming/gapi_streaming_tests.cpp | 491 +++++++++++++++++- 40 files changed, 2827 insertions(+), 195 deletions(-) create mode 100644 modules/gapi/include/opencv2/gapi/streaming/desync.hpp create mode 100644 modules/gapi/samples/infer_single_roi.cpp create mode 100644 modules/gapi/src/api/kernels_streaming.cpp create mode 100644 modules/gapi/src/compiler/passes/intrin.cpp create mode 100644 modules/gapi/src/executor/last_value.hpp create mode 100644 modules/gapi/test/own/last_written_value_tests.cpp diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt index 82b719ad4e..acfbd1d70e 100644 --- a/modules/gapi/CMakeLists.txt +++ b/modules/gapi/CMakeLists.txt @@ -74,6 +74,7 @@ set(gapi_srcs src/api/kernels_imgproc.cpp src/api/kernels_video.cpp src/api/kernels_nnparsers.cpp + src/api/kernels_streaming.cpp src/api/render.cpp src/api/render_ocv.cpp src/api/ginfer.cpp @@ -97,6 +98,7 @@ set(gapi_srcs src/compiler/passes/pattern_matching.cpp src/compiler/passes/perform_substitution.cpp src/compiler/passes/streaming.cpp + src/compiler/passes/intrin.cpp # Executor src/executor/gexecutor.cpp diff --git a/modules/gapi/include/opencv2/gapi.hpp b/modules/gapi/include/opencv2/gapi.hpp index c6ab3f13fd..8445746710 100644 --- a/modules/gapi/include/opencv2/gapi.hpp +++ b/modules/gapi/include/opencv2/gapi.hpp @@ -33,4 +33,8 @@ #include #include +// Include this file here to avoid cyclic dependency between +// Desync & GKernel & GComputation & GStreamingCompiled. +#include + #endif // OPENCV_GAPI_HPP diff --git a/modules/gapi/include/opencv2/gapi/garray.hpp b/modules/gapi/include/opencv2/gapi/garray.hpp index 9118f4de98..0798655666 100644 --- a/modules/gapi/include/opencv2/gapi/garray.hpp +++ b/modules/gapi/include/opencv2/gapi/garray.hpp @@ -284,6 +284,14 @@ namespace detail return static_cast&>(*m_ref).rref(); } + // Check if was created for/from std::vector + template bool holds() const + { + if (!m_ref) return false; + using U = typename std::decay::type; + return dynamic_cast*>(m_ref.get()) != nullptr; + } + void mov(VectorRef &v) { m_ref->mov(*v.m_ref); @@ -341,15 +349,18 @@ public: explicit GArray(detail::GArrayU &&ref) // GArrayU-based constructor : m_ref(ref) { putDetails(); } // (used by GCall, not for users) - detail::GArrayU strip() const { return m_ref; } + /// @private + detail::GArrayU strip() const { + return m_ref; + } + /// @private + static void VCtor(detail::VectorRef& vref) { + vref.reset(); + } private: - static void VCTor(detail::VectorRef& vref) { - vref.reset(); - vref.storeKind(); - } void putDetails() { - m_ref.setConstructFcn(&VCTor); + m_ref.setConstructFcn(&VCtor); m_ref.specifyType(); // FIXME: to unify those 2 to avoid excessive dynamic_cast m_ref.storeKind(); // } diff --git a/modules/gapi/include/opencv2/gapi/gkernel.hpp b/modules/gapi/include/opencv2/gapi/gkernel.hpp index b04cedecad..d4c3e6c634 100644 --- a/modules/gapi/include/opencv2/gapi/gkernel.hpp +++ b/modules/gapi/include/opencv2/gapi/gkernel.hpp @@ -28,6 +28,7 @@ namespace cv { using GShapes = std::vector; using GKinds = std::vector; +using GCtors = std::vector; // GKernel describes kernel API to the system // FIXME: add attributes of a kernel, (e.g. number and types @@ -41,6 +42,7 @@ struct GAPI_EXPORTS GKernel M outMeta; // generic adaptor to API::outMeta(...) GShapes outShapes; // types (shapes) kernel's outputs GKinds inKinds; // kinds of kernel's inputs (fixme: below) + GCtors outCtors; // captured constructors for template output types }; // TODO: It's questionable if inKinds should really be here. Instead, // this information could come from meta. @@ -60,30 +62,27 @@ namespace detail // yield() is used in graph construction time as a generic method to obtain // lazy "return value" of G-API operations // - namespace + template struct Yield; + template<> struct Yield { - template struct Yield; - template<> struct Yield - { - static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); } - }; - template<> struct Yield - { - static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); } - }; - template<> struct Yield - { - static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); } - }; - template struct Yield > - { - static inline cv::GArray yield(cv::GCall &call, int i) { return call.yieldArray(i); } - }; - template struct Yield > - { - static inline cv::GOpaque yield(cv::GCall &call, int i) { return call.yieldOpaque(i); } - }; - } // anonymous namespace + static inline cv::GMat yield(cv::GCall &call, int i) { return call.yield(i); } + }; + template<> struct Yield + { + static inline cv::GMatP yield(cv::GCall &call, int i) { return call.yieldP(i); } + }; + template<> struct Yield + { + static inline cv::GScalar yield(cv::GCall &call, int i) { return call.yieldScalar(i); } + }; + template struct Yield > + { + static inline cv::GArray yield(cv::GCall &call, int i) { return call.yieldArray(i); } + }; + template struct Yield > + { + static inline cv::GOpaque yield(cv::GCall &call, int i) { return call.yieldOpaque(i); } + }; //////////////////////////////////////////////////////////////////////////// // Helper classes which brings outputMeta() marshalling to kernel @@ -215,7 +214,8 @@ public: , K::tag() , &K::getOutMeta , {detail::GTypeTraits::shape...} - , {detail::GTypeTraits::op_kind...}}); + , {detail::GTypeTraits::op_kind...} + , {detail::GObtainCtor::get()...}}); call.pass(args...); // TODO: std::forward() here? return yield(call, typename detail::MkSeq::type()); } @@ -240,7 +240,8 @@ public: , K::tag() , &K::getOutMeta , {detail::GTypeTraits::shape} - , {detail::GTypeTraits::op_kind...}}); + , {detail::GTypeTraits::op_kind...} + , {detail::GObtainCtor::get()}}); call.pass(args...); return detail::Yield::yield(call, 0); } @@ -459,11 +460,6 @@ namespace gapi { std::vector m_transformations; protected: - /// @private - // Check if package contains ANY implementation of a kernel API - // by API textual id. - bool includesAPI(const std::string &id) const; - /// @private // Remove ALL implementations of the given API (identified by ID) void removeAPI(const std::string &id); @@ -566,6 +562,9 @@ namespace gapi { return includesAPI(KAPI::id()); } + /// @private + bool includesAPI(const std::string &id) const; + // FIXME: The below comment is wrong, and who needs this function? /** * @brief Find a kernel (by its API) diff --git a/modules/gapi/include/opencv2/gapi/gopaque.hpp b/modules/gapi/include/opencv2/gapi/gopaque.hpp index 3d1394473b..6ab28910d6 100644 --- a/modules/gapi/include/opencv2/gapi/gopaque.hpp +++ b/modules/gapi/include/opencv2/gapi/gopaque.hpp @@ -295,25 +295,27 @@ namespace detail template class GOpaque { public: - GOpaque() { putDetails(); } // Empty constructor - explicit GOpaque(detail::GOpaqueU &&ref) // GOpaqueU-based constructor - : m_ref(ref) { putDetails(); } // (used by GCall, not for users) - - detail::GOpaqueU strip() const { return m_ref; } - -private: // Host type (or Flat type) - the type this GOpaque is actually // specified to. using HT = typename detail::flatten_g>::type; - static void CTor(detail::OpaqueRef& ref) { - ref.reset(); - ref.storeKind(); + GOpaque() { putDetails(); } // Empty constructor + explicit GOpaque(detail::GOpaqueU &&ref) // GOpaqueU-based constructor + : m_ref(ref) { putDetails(); } // (used by GCall, not for users) + + /// @private + detail::GOpaqueU strip() const { + return m_ref; } + /// @private + static void Ctor(detail::OpaqueRef& ref) { + ref.reset(); + } +private: void putDetails() { - m_ref.setConstructFcn(&CTor); - m_ref.specifyType(); // FIXME: to unify those 2 to avoid excessive dynamic_cast - m_ref.storeKind(); // + m_ref.setConstructFcn(&Ctor); + m_ref.specifyType(); + m_ref.storeKind(); } detail::GOpaqueU m_ref; diff --git a/modules/gapi/include/opencv2/gapi/gstreaming.hpp b/modules/gapi/include/opencv2/gapi/gstreaming.hpp index 037fa94452..e09cf8d0f7 100644 --- a/modules/gapi/include/opencv2/gapi/gstreaming.hpp +++ b/modules/gapi/include/opencv2/gapi/gstreaming.hpp @@ -8,15 +8,99 @@ #ifndef OPENCV_GAPI_GSTREAMING_COMPILED_HPP #define OPENCV_GAPI_GSTREAMING_COMPILED_HPP +#include #include #include #include +#include #include #include namespace cv { +template using optional = cv::util::optional; + +namespace detail { +template struct wref_spec { + using type = T; +}; +template struct wref_spec > { + using type = T; +}; + +template +struct OptRef { + struct OptHolder { + virtual void mov(RefHolder &h) = 0; + virtual void reset() = 0; + virtual ~OptHolder() = default; + using Ptr = std::shared_ptr; + }; + template struct Holder final: OptHolder { + std::reference_wrapper > m_opt_ref; + + explicit Holder(cv::optional& opt) : m_opt_ref(std::ref(opt)) { + } + virtual void mov(RefHolder &h) override { + using U = typename wref_spec::type; + m_opt_ref.get() = cv::util::make_optional(std::move(h.template wref())); + } + virtual void reset() override { + m_opt_ref.get().reset(); + } + }; + template + explicit OptRef(cv::optional& t) : m_opt{new Holder(t)} {} + void mov(RefHolder &h) { m_opt->mov(h); } + void reset() { m_opt->reset();} +private: + typename OptHolder::Ptr m_opt; +}; +using OptionalVectorRef = OptRef; +using OptionalOpaqueRef = OptRef; +} // namespace detail + +// TODO: Keep it in sync with GRunArgP (derive the type automatically?) +using GOptRunArgP = util::variant< + optional*, + optional*, + optional*, + cv::detail::OptionalVectorRef, + cv::detail::OptionalOpaqueRef +>; +using GOptRunArgsP = std::vector; + +namespace detail { + +template inline GOptRunArgP wrap_opt_arg(optional& arg) { + // By default, T goes to an OpaqueRef. All other types are specialized + return GOptRunArgP{OptionalOpaqueRef(arg)}; +} + +template inline GOptRunArgP wrap_opt_arg(optional >& arg) { + return GOptRunArgP{OptionalVectorRef(arg)}; +} + +template<> inline GOptRunArgP wrap_opt_arg(optional &m) { + return GOptRunArgP{&m}; +} + +template<> inline GOptRunArgP wrap_opt_arg(optional &s) { + return GOptRunArgP{&s}; +} + +} // namespace detail + +// Now cv::gout() may produce an empty vector (see "dynamic graphs"), so +// there may be a conflict between these two. State here that Opt version +// _must_ have at least one input for this overload +template +inline GOptRunArgsP gout(optional&arg, optional&... args) +{ + return GOptRunArgsP{ detail::wrap_opt_arg(arg), detail::wrap_opt_arg(args)... }; +} + /** * \addtogroup gapi_main_classes * @{ @@ -169,6 +253,44 @@ public: // NB: Used from python GAPI_WRAP std::tuple pull(); + /** + * @brief Get some next available data from the pipeline. + * + * This method takes a vector of cv::optional object. An object is + * assigned to some value if this value is available (ready) at + * the time of the call, and resets the object to empty() if it is + * not. + * + * This is a blocking method which guarantees that some data has + * been written to the output vector on return. + * + * Using this method only makes sense if the graph has + * desynchronized parts (see cv::gapi::desync). If there is no + * desynchronized parts in the graph, the behavior of this + * method is identical to the regular pull() (all data objects are + * produced synchronously in the output vector). + * + * Use gout() to create an output parameter vector. + * + * Output vectors must have the same number of elements as defined + * in the cv::GComputation protocol (at the moment of its + * construction). Shapes of elements also must conform to protocol + * (e.g. cv::optional needs to be passed where cv::GMat + * has been declared as output, and so on). Run-time exception is + * generated on type mismatch. + * + * This method writes new data into objects passed via output + * vector. If there is no data ready yet, this method blocks. Use + * try_pull() if you need a non-blocking version. + * + * @param outs vector of output parameters to obtain. + * @return true if next result has been obtained, + * false marks end of the stream. + * + * @sa cv::gapi::desync + */ + bool pull(cv::GOptRunArgsP &&outs); + /** * @brief Try to get the next processed frame from the pipeline. * diff --git a/modules/gapi/include/opencv2/gapi/gtype_traits.hpp b/modules/gapi/include/opencv2/gapi/gtype_traits.hpp index c9800b2b16..2e8dcb1aec 100644 --- a/modules/gapi/include/opencv2/gapi/gtype_traits.hpp +++ b/modules/gapi/include/opencv2/gapi/gtype_traits.hpp @@ -191,6 +191,29 @@ namespace detail template using wrap_gapi_helper = WrapValue::type>; template using wrap_host_helper = WrapValue >::type>; + +// Union type for various user-defined type constructors (GArray, +// GOpaque, etc) +// +// TODO: Replace construct-only API with a more generic one (probably +// with bits of introspection) +// +// Not required for non-user-defined types (GMat, GScalar, etc) +using HostCtor = util::variant + < util::monostate + , detail::ConstructVec + , detail::ConstructOpaque + >; + +template struct GObtainCtor { + static HostCtor get() { return HostCtor{}; } +}; +template struct GObtainCtor > { + static HostCtor get() { return HostCtor{ConstructVec{&GArray::VCtor}}; }; +}; +template struct GObtainCtor > { + static HostCtor get() { return HostCtor{ConstructOpaque{&GOpaque::Ctor}}; }; +}; } // namespace detail } // namespace cv diff --git a/modules/gapi/include/opencv2/gapi/infer.hpp b/modules/gapi/include/opencv2/gapi/infer.hpp index 9b4580ec6b..b850775a62 100644 --- a/modules/gapi/include/opencv2/gapi/infer.hpp +++ b/modules/gapi/include/opencv2/gapi/infer.hpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2019 Intel Corporation +// Copyright (C) 2019-2020 Intel Corporation #ifndef OPENCV_GAPI_INFER_HPP @@ -77,6 +77,9 @@ public: using ResultL = std::tuple< cv::GArray... >; using APIList = std::function, Args...)>; + + // FIXME: Args... must be limited to a single GMat + using APIRoi = std::function, Args...)>; }; // Single-return-value network definition (specialized base class) @@ -92,6 +95,9 @@ public: using ResultL = cv::GArray; using APIList = std::function, Args...)>; + + // FIXME: Args... must be limited to a single GMat + using APIRoi = std::function, Args...)>; }; // APIList2 is also template to allow different calling options @@ -114,10 +120,10 @@ struct InferAPIList2 { // a particular backend, not by a network itself. struct GInferBase { static constexpr const char * id() { - return "org.opencv.dnn.infer"; // Universal stub + return "org.opencv.dnn.infer"; // Universal stub } static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { - return GMetaArgs{}; // One more universal stub + return GMetaArgs{}; // One more universal stub } }; @@ -164,15 +170,25 @@ private: std::shared_ptr m_priv; }; /** @} */ +// Base "InferROI" kernel. +// All notes from "Infer" kernel apply here as well. +struct GInferROIBase { + static constexpr const char * id() { + return "org.opencv.dnn.infer-roi"; // Universal stub + } + static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { + return GMetaArgs{}; // One more universal stub + } +}; // Base "Infer list" kernel. // All notes from "Infer" kernel apply here as well. struct GInferListBase { static constexpr const char * id() { - return "org.opencv.dnn.infer-roi"; // Universal stub + return "org.opencv.dnn.infer-roi-list-1"; // Universal stub } static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { - return GMetaArgs{}; // One more universal stub + return GMetaArgs{}; // One more universal stub } }; @@ -180,10 +196,10 @@ struct GInferListBase { // All notes from "Infer" kernel apply here as well. struct GInferList2Base { static constexpr const char * id() { - return "org.opencv.dnn.infer-roi-list"; // Universal stub + return "org.opencv.dnn.infer-roi-list-2"; // Universal stub } static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) { - return GMetaArgs{}; // One more universal stub + return GMetaArgs{}; // One more universal stub } }; @@ -200,6 +216,19 @@ struct GInfer final static constexpr const char* tag() { return Net::tag(); } }; +// A specific roi-inference kernel. API (::on()) is fixed here and +// verified against Net. +template +struct GInferROI final + : public GInferROIBase + , public detail::KernelTypeMedium< GInferROI + , typename Net::APIRoi > { + using GInferROIBase::getOutMeta; // FIXME: name lookup conflict workaround? + + static constexpr const char* tag() { return Net::tag(); } +}; + + // A generic roi-list inference kernel. API (::on()) is derived from // the Net template parameter (see more in infer<> overload). template @@ -238,6 +267,23 @@ struct GInferList2 final namespace cv { namespace gapi { +/** @brief Calculates response for the specified network (template + * parameter) for the specified region in the source image. + * Currently expects a single-input network only. + * + * @tparam A network type defined with G_API_NET() macro. + * @param in input image where to take ROI from. + * @param roi an object describing the region of interest + * in the source image. May be calculated in the same graph dynamically. + * @return an object of return type as defined in G_API_NET(). + * If a network has multiple return values (defined with a tuple), a tuple of + * objects of appropriate type is returned. + * @sa G_API_NET() + */ +template +typename Net::Result infer(cv::GOpaque roi, cv::GMat in) { + return GInferROI::on(roi, in); +} /** @brief Calculates responses for the specified network (template * parameter) for every region in the source image. @@ -328,7 +374,8 @@ infer(const std::string& tag, const GInferInputs& inputs) tag, GInferBase::getOutMeta, {}, // outShape will be filled later - std::move(kinds) + std::move(kinds), + {}, // outCtors will be filled later }); call->setArgs(std::move(input_args)); diff --git a/modules/gapi/include/opencv2/gapi/streaming/desync.hpp b/modules/gapi/include/opencv2/gapi/streaming/desync.hpp new file mode 100644 index 0000000000..86de279fe9 --- /dev/null +++ b/modules/gapi/include/opencv2/gapi/streaming/desync.hpp @@ -0,0 +1,84 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + + +#ifndef OPENCV_GAPI_GSTREAMING_DESYNC_HPP +#define OPENCV_GAPI_GSTREAMING_DESYNC_HPP + +#include + +#include +#include +#include +#include +#include + +namespace cv { +namespace gapi { +namespace streaming { + +namespace detail { +struct GDesync { + static const char *id() { + return "org.opencv.streaming.desync"; + } + + // An universal yield for desync. + // Yields output objects according to the input Types... + // Reuses gkernel machinery. + // FIXME: This function can be generic and declared in gkernel.hpp + // (it is there already, but a part of GKernelType[M] + template + static std::tuple yield(cv::GCall &call, cv::detail::Seq) { + return std::make_tuple(cv::detail::Yield::yield(call, IIs)...); + } +}; + +template +G desync(const G &g) { + cv::GKernel k{ + GDesync::id() // kernel id + , "" // kernel tag + , [](const GMetaArgs &a, const GArgs &) {return a;} // outMeta callback + , {cv::detail::GTypeTraits::shape} // output Shape + , {cv::detail::GTypeTraits::op_kind} // input data kinds + , {cv::detail::GObtainCtor::get()} // output template ctors + }; + cv::GCall call(std::move(k)); + call.pass(g); + return std::get<0>(GDesync::yield(call, cv::detail::MkSeq<1>::type())); +} +} // namespace detail + +/** + * @brief Starts a desynchronized branch in the graph. + * + * This operation takes a single G-API data object and returns a + * graph-level "duplicate" of this object. + * + * Operations which use this data object can be desynchronized + * from the rest of the graph. + * + * This operation has no effect when a GComputation is compiled with + * regular cv::GComputation::compile(), since cv::GCompiled objects + * always produce their full output vectors. + * + * This operation only makes sense when a GComputation is compiled in + * straming mode with cv::GComputation::compileStreaming(). If this + * operation is used and there are desynchronized outputs, the user + * should use a special version of cv::GStreamingCompiled::pull() + * which produces an array of cv::util::optional<> objects. + * + * @note This feature is highly experimental now and is currently + * limited to a single GMat argument only. + */ +GAPI_EXPORTS GMat desync(const GMat &g); + +} // namespace streaming +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_GSTREAMING_DESYNC_HPP diff --git a/modules/gapi/samples/infer_single_roi.cpp b/modules/gapi/samples/infer_single_roi.cpp new file mode 100644 index 0000000000..6054a3f4a6 --- /dev/null +++ b/modules/gapi/samples/infer_single_roi.cpp @@ -0,0 +1,264 @@ +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +const std::string keys = + "{ h help | | Print this help message }" + "{ input | | Path to the input video file }" + "{ facem | face-detection-adas-0001.xml | Path to OpenVINO IE face detection model (.xml) }" + "{ faced | CPU | Target device for face detection model (e.g. CPU, GPU, VPU, ...) }" + "{ r roi | -1,-1,-1,-1 | Region of interest (ROI) to use for inference. Identified automatically when not set }"; + +namespace { + +std::string weights_path(const std::string &model_path) { + const auto EXT_LEN = 4u; + const auto sz = model_path.size(); + CV_Assert(sz > EXT_LEN); + + auto ext = model_path.substr(sz - EXT_LEN); + std::transform(ext.begin(), ext.end(), ext.begin(), [](unsigned char c){ + return static_cast(std::tolower(c)); + }); + CV_Assert(ext == ".xml"); + return model_path.substr(0u, sz - EXT_LEN) + ".bin"; +} + +cv::util::optional parse_roi(const std::string &rc) { + cv::Rect rv; + char delim[3]; + + std::stringstream is(rc); + is >> rv.x >> delim[0] >> rv.y >> delim[1] >> rv.width >> delim[2] >> rv.height; + if (is.bad()) { + return cv::util::optional(); // empty value + } + const auto is_delim = [](char c) { + return c == ','; + }; + if (!std::all_of(std::begin(delim), std::end(delim), is_delim)) { + return cv::util::optional(); // empty value + + } + if (rv.x < 0 || rv.y < 0 || rv.width <= 0 || rv.height <= 0) { + return cv::util::optional(); // empty value + } + return cv::util::make_optional(std::move(rv)); +} + +} // namespace + +namespace custom { + +G_API_NET(FaceDetector, , "face-detector"); + +using GDetections = cv::GArray; +using GRect = cv::GOpaque; +using GSize = cv::GOpaque; +using GPrims = cv::GArray; + +G_API_OP(GetSize, , "sample.custom.get-size") { + static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) { + return cv::empty_gopaque_desc(); + } +}; + +G_API_OP(LocateROI, , "sample.custom.locate-roi") { + static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) { + return cv::empty_gopaque_desc(); + } +}; + +G_API_OP(ParseSSD, , "sample.custom.parse-ssd") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &, const cv::GOpaqueDesc &) { + return cv::empty_array_desc(); + } +}; + +G_API_OP(BBoxes, , "sample.custom.b-boxes") { + static cv::GArrayDesc outMeta(const cv::GArrayDesc &, const cv::GOpaqueDesc &) { + return cv::empty_array_desc(); + } +}; + +GAPI_OCV_KERNEL(OCVGetSize, GetSize) { + static void run(const cv::Mat &in, cv::Size &out) { + out = {in.cols, in.rows}; + } +}; + +GAPI_OCV_KERNEL(OCVLocateROI, LocateROI) { + // This is the place where we can run extra analytics + // on the input image frame and select the ROI (region + // of interest) where we want to detect our objects (or + // run any other inference). + // + // Currently it doesn't do anything intelligent, + // but only crops the input image to square (this is + // the most convenient aspect ratio for detectors to use) + + static void run(const cv::Mat &in_mat, cv::Rect &out_rect) { + + // Identify the central point & square size (- some padding) + const auto center = cv::Point{in_mat.cols/2, in_mat.rows/2}; + auto sqside = std::min(in_mat.cols, in_mat.rows); + + // Now build the central square ROI + out_rect = cv::Rect{ center.x - sqside/2 + , center.y - sqside/2 + , sqside + , sqside + }; + } +}; + +GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) { + static void run(const cv::Mat &in_ssd_result, + const cv::Rect &in_roi, + const cv::Size &in_parent_size, + std::vector &out_objects) { + const auto &in_ssd_dims = in_ssd_result.size; + CV_Assert(in_ssd_dims.dims() == 4u); + + const int MAX_PROPOSALS = in_ssd_dims[2]; + const int OBJECT_SIZE = in_ssd_dims[3]; + CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size + + const cv::Size up_roi = in_roi.size(); + const cv::Rect surface({0,0}, in_parent_size); + + out_objects.clear(); + + const float *data = in_ssd_result.ptr(); + for (int i = 0; i < MAX_PROPOSALS; i++) { + const float image_id = data[i * OBJECT_SIZE + 0]; + const float label = data[i * OBJECT_SIZE + 1]; + const float confidence = data[i * OBJECT_SIZE + 2]; + const float rc_left = data[i * OBJECT_SIZE + 3]; + const float rc_top = data[i * OBJECT_SIZE + 4]; + const float rc_right = data[i * OBJECT_SIZE + 5]; + const float rc_bottom = data[i * OBJECT_SIZE + 6]; + (void) label; // unused + + if (image_id < 0.f) { + break; // marks end-of-detections + } + if (confidence < 0.5f) { + continue; // skip objects with low confidence + } + + // map relative coordinates to the original image scale + // taking the ROI into account + cv::Rect rc; + rc.x = static_cast(rc_left * up_roi.width); + rc.y = static_cast(rc_top * up_roi.height); + rc.width = static_cast(rc_right * up_roi.width) - rc.x; + rc.height = static_cast(rc_bottom * up_roi.height) - rc.y; + rc.x += in_roi.x; + rc.y += in_roi.y; + out_objects.emplace_back(rc & surface); + } + } +}; + +GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) { + // This kernel converts the rectangles into G-API's + // rendering primitives + static void run(const std::vector &in_face_rcs, + const cv::Rect &in_roi, + std::vector &out_prims) { + out_prims.clear(); + const auto cvt = [](const cv::Rect &rc, const cv::Scalar &clr) { + return cv::gapi::wip::draw::Rect(rc, clr, 2); + }; + out_prims.emplace_back(cvt(in_roi, CV_RGB(0,255,255))); // cyan + for (auto &&rc : in_face_rcs) { + out_prims.emplace_back(cvt(rc, CV_RGB(0,255,0))); // green + } + } +}; + +} // namespace custom + +int main(int argc, char *argv[]) +{ + cv::CommandLineParser cmd(argc, argv, keys); + if (cmd.has("help")) { + cmd.printMessage(); + return 0; + } + + // Prepare parameters first + const std::string input = cmd.get("input"); + const auto opt_roi = parse_roi(cmd.get("roi")); + + const auto face_model_path = cmd.get("facem"); + auto face_net = cv::gapi::ie::Params { + face_model_path, // path to topology IR + weights_path(face_model_path), // path to weights + cmd.get("faced"), // device specifier + }; + auto kernels = cv::gapi::kernels + < custom::OCVGetSize + , custom::OCVLocateROI + , custom::OCVParseSSD + , custom::OCVBBoxes>(); + auto networks = cv::gapi::networks(face_net); + + // Now build the graph. The graph structure may vary + // pased on the input parameters + cv::GStreamingCompiled pipeline; + auto inputs = cv::gin(cv::gapi::wip::make_src(input)); + + if (opt_roi.has_value()) { + // Use the value provided by user + std::cout << "Will run inference for static region " + << opt_roi.value() + << " only" + << std::endl; + cv::GMat in; + cv::GOpaque in_roi; + auto blob = cv::gapi::infer(in_roi, in); + auto rcs = custom::ParseSSD::on(blob, in_roi, custom::GetSize::on(in)); + auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, in_roi)); + pipeline = cv::GComputation(cv::GIn(in, in_roi), cv::GOut(out)) + .compileStreaming(cv::compile_args(kernels, networks)); + + // Since the ROI to detect is manual, make it part of the input vector + inputs.push_back(cv::gin(opt_roi.value())[0]); + } else { + // Automatically detect ROI to infer. Make it output parameter + std::cout << "ROI is not set or invalid. Locating it automatically" + << std::endl; + cv::GMat in; + cv::GOpaque roi = custom::LocateROI::on(in); + auto blob = cv::gapi::infer(roi, in); + auto rcs = custom::ParseSSD::on(blob, roi, custom::GetSize::on(in)); + auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs, roi)); + pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out)) + .compileStreaming(cv::compile_args(kernels, networks)); + } + + // The execution part + pipeline.setSource(std::move(inputs)); + pipeline.start(); + + cv::Mat out; + while (pipeline.pull(cv::gout(out))) { + cv::imshow("Out", out); + cv::waitKey(1); + } + return 0; +} diff --git a/modules/gapi/src/api/gbackend.cpp b/modules/gapi/src/api/gbackend.cpp index 600e5cc84d..6b8d0fcbee 100644 --- a/modules/gapi/src/api/gbackend.cpp +++ b/modules/gapi/src/api/gbackend.cpp @@ -67,6 +67,21 @@ cv::gapi::GKernelPackage cv::gapi::GBackend::Priv::auxiliaryKernels() const return {}; } +bool cv::gapi::GBackend::Priv::controlsMerge() const +{ + return false; +} + +bool cv::gapi::GBackend::Priv::allowsMerge(const cv::gimpl::GIslandModel::Graph &, + const ade::NodeHandle &, + const ade::NodeHandle &, + const ade::NodeHandle &) const +{ + GAPI_Assert(controlsMerge()); + return true; +} + + // GBackend public implementation ////////////////////////////////////////////// cv::gapi::GBackend::GBackend() { diff --git a/modules/gapi/src/api/gbackend_priv.hpp b/modules/gapi/src/api/gbackend_priv.hpp index 13f39acc86..45237514a5 100644 --- a/modules/gapi/src/api/gbackend_priv.hpp +++ b/modules/gapi/src/api/gbackend_priv.hpp @@ -19,7 +19,7 @@ #include "opencv2/gapi/gkernel.hpp" #include "compiler/gmodel.hpp" - +#include "compiler/gislandmodel.hpp" namespace cv { @@ -68,6 +68,22 @@ public: virtual cv::gapi::GKernelPackage auxiliaryKernels() const; + // Ask backend if it has a custom control over island fusion process + // This method is quite redundant but there's nothing better fits + // the current fusion process. By default, [existing] backends don't + // control the merge. + // FIXME: Refactor to a single entity? + virtual bool controlsMerge() const; + + // Ask backend if it is ok to merge these two islands connected + // via a data slot. By default, [existing] backends allow to merge everything. + // FIXME: Refactor to a single entity? + // FIXME: Strip down the type details form graph? (make it ade::Graph?) + virtual bool allowsMerge(const cv::gimpl::GIslandModel::Graph &g, + const ade::NodeHandle &a_nh, + const ade::NodeHandle &slot_nh, + const ade::NodeHandle &b_nh) const; + virtual ~Priv() = default; }; diff --git a/modules/gapi/src/api/ginfer.cpp b/modules/gapi/src/api/ginfer.cpp index 20511a4aaf..156f8938c4 100644 --- a/modules/gapi/src/api/ginfer.cpp +++ b/modules/gapi/src/api/ginfer.cpp @@ -70,7 +70,10 @@ cv::GMat cv::GInferOutputs::at(const std::string& name) auto it = m_priv->out_blobs.find(name); if (it == m_priv->out_blobs.end()) { // FIXME: Avoid modifying GKernel + // Expect output to be always GMat m_priv->call->kernel().outShapes.push_back(cv::GShape::GMAT); + // ...so _empty_ constructor is passed here. + m_priv->call->kernel().outCtors.emplace_back(cv::util::monostate{}); int out_idx = static_cast(m_priv->out_blobs.size()); it = m_priv->out_blobs.emplace(name, m_priv->call->yield(out_idx)).first; m_priv->info->out_names.push_back(name); diff --git a/modules/gapi/src/api/kernels_streaming.cpp b/modules/gapi/src/api/kernels_streaming.cpp new file mode 100644 index 0000000000..af7bd19dd1 --- /dev/null +++ b/modules/gapi/src/api/kernels_streaming.cpp @@ -0,0 +1,74 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "precomp.hpp" + +#include +#include + +cv::GMat cv::gapi::streaming::desync(const cv::GMat &g) { + // FIXME: this is a limited implementation of desync + // The real implementation must be generic (template) and + // reside in desync.hpp (and it is detail::desync<>()) + + // FIXME: Put a copy here to solve the below problem + // FIXME: Because of the copy, the desync functionality is limited + // to GMat only (we don't have generic copy kernel for other + // object types) + return cv::gapi::copy(detail::desync(g)); + + // FIXME + // + // If consumed by multiple different islands (OCV and Fluid by + // example, an object needs to be desynchronized individually + // for every path. + // + // This is a limitation of the current implementation. It works + // this way: every "desync" link from the main path to a new + // desync path gets its "DesyncQueue" object which stores only the + // last value written before of the desync object (DO) it consumes + // (the container of type "last written value" or LWV. + // + // LWV + // [Sync path] -> desync() - - > DO -> [ISL0 @ Desync path #1] + // + // At the same time, generally, every island in the streaming + // graph gets its individual input as a queue (so normally, a + // writer pushes the same output MULTIPLE TIMES if it has mutliple + // readers): + // + // LWV + // [Sync path] -> desync() - - > DO1 -> [ISL0 @ Desync path #1] + // : LWV + // ' - - > DO2 -> [ISL1 @ Desync path #1] + // + // For users, it may seem legit to use desync here only once, and + // it MUST BE legit once the problem is fixed. + // But the problem with the current implementation is that islands + // on the same desync path get different desync queues and in fact + // stay desynchronized between each other. One shouldn't consider + // this as a single desync path anymore. + // If these two ISLs are then merged e.g. with add(a,b), the + // results will be inconsistent, given that the latency of ISL0 + // and ISL1 may be different. This is not the same frame anymore + // coming as `a` and `b` to add(a,b) because of it. + // + // To make things clear, we forbid this now and ask to call + // desync one more time to allow that. It is bad since the graph + // structure and island layout depends on kernel packages used, + // not on the sole GComputation structure. This needs to be fixed! + // Here's the working configuration: + // + // LWV + // [Sync path] -> desync() - - > DO1 -> [ISL0 @ Desync path #1] + // : LWV + // '-> desync() - - > DO2 -> [ISL1 @ Desync path #2] <-(!) + // + // Put an operation right after desync() is a quick workaround to + // this synchronization problem. There will be one "last_written_value" + // connected to a desynchronized data object, and this sole last_written_value + // object will feed both branches of the streaming executable. +} diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp index 08836163a7..c66fa44361 100644 --- a/modules/gapi/src/backends/ie/giebackend.cpp +++ b/modules/gapi/src/backends/ie/giebackend.cpp @@ -519,6 +519,65 @@ struct Infer: public cv::detail::KernelTag { } }; +struct InferROI: public cv::detail::KernelTag { + using API = cv::GInferROIBase; + static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + cv::GMetaArgs result; + + GConstGIEModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + + // Initialize input information + // FIXME: So far it is pretty limited + GAPI_Assert(1u == uu.params.input_names.size()); + GAPI_Assert(2u == in_metas.size()); + + // 0th is ROI, 1st is in0put image + auto &&ii = uu.inputs.at(uu.params.input_names.at(0)); + const auto &meta = util::get(in_metas.at(1)); + ii->setPrecision(toIE(meta.depth)); + ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR); + + // FIXME: It would be nice here to have an exact number of network's + // input/output parameters. Probably GCall should store it here for us. + // It doesn't, as far as I know.. + for (const auto &out_name : uu.params.output_names) { + // NOTE: our output_names vector follows the API order + // of this operation's outputs + const IE::DataPtr& ie_out = uu.outputs.at(out_name); + const IE::SizeVector dims = ie_out->getTensorDesc().getDims(); + + cv::GMatDesc outm(toCV(ie_out->getPrecision()), + toCV(ie_out->getTensorDesc().getDims())); + result.emplace_back(outm); + } + return result; + } + + static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) { + // non-generic version for now, per the InferROI's definition + GAPI_Assert(uu.params.num_in == 1); + const auto& this_roi = ctx.inArg(0).rref(); + const auto this_mat = ctx.inMat(1); + IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE); + IE::Blob::Ptr roi_blob = IE::make_shared_blob(this_blob, toIE(this_roi)); + iec.this_request.SetBlob(*uu.params.input_names.begin(), roi_blob); + iec.this_request.Infer(); + for (auto i : ade::util::iota(uu.params.num_out)) { + cv::Mat& out_mat = ctx.outMatR(i); + IE::Blob::Ptr out_blob = iec.this_request.GetBlob(uu.params.output_names[i]); + copyFromIE(out_blob, out_mat); + } + } +}; + + struct InferList: public cv::detail::KernelTag { using API = cv::GInferListBase; static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); } @@ -780,6 +839,7 @@ namespace { virtual cv::gapi::GKernelPackage auxiliaryKernels() const override { return cv::gapi::kernels< cv::gimpl::ie::Infer + , cv::gimpl::ie::InferROI , cv::gimpl::ie::InferList , cv::gimpl::ie::InferList2 >(); diff --git a/modules/gapi/src/backends/ocl/goclbackend.cpp b/modules/gapi/src/backends/ocl/goclbackend.cpp index 34dba01afe..847b802fd2 100644 --- a/modules/gapi/src/backends/ocl/goclbackend.cpp +++ b/modules/gapi/src/backends/ocl/goclbackend.cpp @@ -272,4 +272,8 @@ void cv::gimpl::GOCLExecutable::run(std::vector &&input_objs, GAPI_Assert((out_arg_data == (mag_mat.getMat(ACCESS_RW).data)) && " data for output parameters was reallocated ?"); } } + + // In/Out args clean-up is mandatory now with RMat + for (auto &it : input_objs) magazine::unbind(m_res, it.first); + for (auto &it : output_objs) magazine::unbind(m_res, it.first); } diff --git a/modules/gapi/src/compiler/gcompiler.cpp b/modules/gapi/src/compiler/gcompiler.cpp index 76c40ddca0..eb75f44e0e 100644 --- a/modules/gapi/src/compiler/gcompiler.cpp +++ b/modules/gapi/src/compiler/gcompiler.cpp @@ -238,6 +238,11 @@ cv::gimpl::GCompiler::GCompiler(const cv::GComputation &c, // (no compound backend present here) m_e.addPass("kernels", "check_islands_content", passes::checkIslandsContent); + // Special stage for intrinsics handling + m_e.addPassStage("intrin"); + m_e.addPass("intrin", "desync", passes::intrinDesync); + m_e.addPass("intrin", "finalizeIntrin", passes::intrinFinalize); + //Input metas may be empty when a graph is compiled for streaming m_e.addPassStage("meta"); if (!m_metas.empty()) @@ -384,6 +389,9 @@ cv::gimpl::GCompiler::GPtr cv::gimpl::GCompiler::generateGraph() { GModel::Graph(*g).metadata().set(OriginalInputMeta{m_metas}); } + // FIXME: remove m_args, remove GCompileArgs from backends' method signatures, + // rework backends to access GCompileArgs from graph metadata + GModel::Graph(*g).metadata().set(CompileArgs{m_args}); return g; } diff --git a/modules/gapi/src/compiler/gislandmodel.cpp b/modules/gapi/src/compiler/gislandmodel.cpp index aee0477e08..9ffc605372 100644 --- a/modules/gapi/src/compiler/gislandmodel.cpp +++ b/modules/gapi/src/compiler/gislandmodel.cpp @@ -175,13 +175,26 @@ void GIslandModel::generateInitial(GIslandModel::Graph &g, { auto src_data_nh = in_edge->srcNode(); auto isl_slot_nh = data_to_slot.at(src_data_nh); - g.link(isl_slot_nh, nh); // no other data stored yet + auto isl_new_eh = g.link(isl_slot_nh, nh); // no other data stored yet + // Propagate some special metadata from the GModel to GIslandModel + // TODO: Make it a single place (a function) for both inputs/outputs? + // (since it is duplicated in the below code block) + if (src_g.metadata(in_edge).contains()) + { + const auto idx = src_g.metadata(in_edge).get().index; + g.metadata(isl_new_eh).set(DesyncIslEdge{idx}); + } } for (auto out_edge : src_op_nh->outEdges()) { auto dst_data_nh = out_edge->dstNode(); auto isl_slot_nh = data_to_slot.at(dst_data_nh); - g.link(nh, isl_slot_nh); + auto isl_new_eh = g.link(nh, isl_slot_nh); + if (src_g.metadata(out_edge).contains()) + { + const auto idx = src_g.metadata(out_edge).get().index; + g.metadata(isl_new_eh).set(DesyncIslEdge{idx}); + } } } // for(all_operations) } @@ -254,6 +267,9 @@ void GIslandModel::syncIslandTags(Graph &g, ade::Graph &orig_g) void GIslandModel::compileIslands(Graph &g, const ade::Graph &orig_g, const GCompileArgs &args) { GModel::ConstGraph gm(orig_g); + if (gm.metadata().contains()) { + util::throw_error(std::logic_error("FATAL: The graph has unresolved intrinsics")); + } auto original_sorted = gm.metadata().get(); for (auto nh : g.nodes()) diff --git a/modules/gapi/src/compiler/gislandmodel.hpp b/modules/gapi/src/compiler/gislandmodel.hpp index 6cf8f98667..c2e7b96d45 100644 --- a/modules/gapi/src/compiler/gislandmodel.hpp +++ b/modules/gapi/src/compiler/gislandmodel.hpp @@ -142,6 +142,14 @@ public: // at that stage. virtual void handleNewStream() {}; // do nothing here by default + // This method is called for every IslandExecutable when + // the stream-based execution is stopped. + // All processing is guaranteed to be stopped by this moment, + // with no pending or running 'run()' processes ran in background. + // FIXME: This method is tightly bound to the GStreamingExecutor + // now. + virtual void handleStopStream() {} // do nothing here by default + virtual ~GIslandExecutable() = default; }; @@ -222,8 +230,19 @@ struct IslandsCompiled static const char *name() { return "IslandsCompiled"; } }; +// This flag marks an edge in an GIslandModel as "desynchronized" +// i.e. it starts a new desynchronized subgraph +struct DesyncIslEdge +{ + static const char *name() { return "DesynchronizedIslandEdge"; } + + // Projection from GModel/DesyncEdge.index + int index; +}; + namespace GIslandModel { + using Graph = ade::TypedGraph < NodeKind , FusedIsland @@ -232,6 +251,7 @@ namespace GIslandModel , Emitter , Sink , IslandsCompiled + , DesyncIslEdge , ade::passes::TopologicalSortData >; @@ -244,6 +264,7 @@ namespace GIslandModel , Emitter , Sink , IslandsCompiled + , DesyncIslEdge , ade::passes::TopologicalSortData >; diff --git a/modules/gapi/src/compiler/gmodel.cpp b/modules/gapi/src/compiler/gmodel.cpp index b5b76fd1c9..ea4eb880a4 100644 --- a/modules/gapi/src/compiler/gmodel.cpp +++ b/modules/gapi/src/compiler/gmodel.cpp @@ -77,7 +77,7 @@ ade::NodeHandle GModel::mkDataNode(GModel::Graph &g, const GShape shape) return data_h; } -void GModel::linkIn(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t in_port) +ade::EdgeHandle GModel::linkIn(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t in_port) { // Check if input is already connected for (const auto& in_e : opH->inEdges()) @@ -96,9 +96,11 @@ void GModel::linkIn(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::si // Replace an API object with a REF (G* -> GOBJREF) op.args[in_port] = cv::GArg(RcDesc{gm.rc, gm.shape, {}}); + + return eh; } -void GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t out_port) +ade::EdgeHandle GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::size_t out_port) { // FIXME: check validity using kernel prototype @@ -121,6 +123,8 @@ void GModel::linkOut(Graph &g, ade::NodeHandle opH, ade::NodeHandle objH, std::s const auto min_out_size = std::max(op.outs.size(), storage_with_port); op.outs.resize(min_out_size, RcDesc{-1,GShape::GMAT,{}}); // FIXME: Invalid shape instead? op.outs[out_port] = RcDesc{gm.rc, gm.shape, {}}; + + return eh; } std::vector GModel::orderedInputs(const ConstGraph &g, ade::NodeHandle nh) @@ -210,26 +214,29 @@ ade::NodeHandle GModel::detail::dataNodeOf(const ConstLayoutGraph &g, const GOri return g.metadata().get().object_nodes.at(origin); } -void GModel::redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to) +std::vector GModel::redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to) { std::vector ehh(from->outEdges().begin(), from->outEdges().end()); + std::vector ohh; + ohh.reserve(ehh.size()); for (auto e : ehh) { auto dst = e->dstNode(); auto input = g.metadata(e).get(); g.erase(e); - linkIn(g, dst, to, input.port); + ohh.push_back(linkIn(g, dst, to, input.port)); } + return ohh; } -void GModel::redirectWriter(Graph &g, ade::NodeHandle from, ade::NodeHandle to) +ade::EdgeHandle GModel::redirectWriter(Graph &g, ade::NodeHandle from, ade::NodeHandle to) { GAPI_Assert(from->inEdges().size() == 1); auto e = from->inEdges().front(); auto op = e->srcNode(); auto output = g.metadata(e).get(); g.erase(e); - linkOut(g, op, to, output.port); + return linkOut(g, op, to, output.port); } GMetaArgs GModel::collectInputMeta(const GModel::ConstGraph &cg, ade::NodeHandle node) diff --git a/modules/gapi/src/compiler/gmodel.hpp b/modules/gapi/src/compiler/gmodel.hpp index 5f02e58354..d016766fb5 100644 --- a/modules/gapi/src/compiler/gmodel.hpp +++ b/modules/gapi/src/compiler/gmodel.hpp @@ -211,6 +211,58 @@ struct CustomMetaFunction CM customOutMeta; }; +// This is a general flag indicating that this GModel has intrinsics. +// In the beginning of the compilation, it is a quick check to +// indicate there are intrinsics. +// +// In the end of the compilation, having this flag is fatal -- all +// intrinsics must be resolved. +struct HasIntrinsics +{ + static const char *name() { return "HasIntrinsicsFlag"; } +}; + +// This is a special tag for both DATA and OP nodes indicating +// which desynchronized path this node belongs to. +// This tag is set by a special complex pass intrinDesync/accept. +struct DesyncPath +{ + static const char *name() { return "DesynchronizedPath"; } + + // A zero-based index of the desynchronized path in the graph. + // Set by intrinDesync() compiler pass + int index; +}; + +// This is a special tag for graph Edges indicating that this +// particular edge starts a desynchronized path in the graph. +// At the execution stage, the data coming "through" these edges +// (virtually, of course, since our GModel edges never transfer the +// actual data, they just represent these transfers) is desynchronized +// from the rest of the pipeline, i.e. may be "lost" (stay unconsumed +// and then overwritten with some new data when streaming). +struct DesyncEdge +{ + static const char *name() { return "DesynchronizedEdge"; } + + // A zero-based index of the desynchronized path in the graph. + // Set by intrinDesync/apply() compiler pass + int index; +}; + +// This flag marks the island graph as "desynchronized" +struct Desynchronized +{ + static const char *name() { return "Desynchronized"; } +}; + +// Reference to compile args of the computation +struct CompileArgs +{ + static const char *name() { return "CompileArgs"; } + GCompileArgs args; +}; + namespace GModel { using Graph = ade::TypedGraph @@ -232,6 +284,11 @@ namespace GModel , CustomMetaFunction , Streaming , Deserialized + , HasIntrinsics + , DesyncPath + , DesyncEdge + , Desynchronized + , CompileArgs >; // FIXME: How to define it based on GModel??? @@ -254,6 +311,11 @@ namespace GModel , CustomMetaFunction , Streaming , Deserialized + , HasIntrinsics + , DesyncPath + , DesyncEdge + , Desynchronized + , CompileArgs >; // FIXME: @@ -278,11 +340,11 @@ namespace GModel // Clears logged messages of a node. GAPI_EXPORTS void log_clear(Graph &g, ade::NodeHandle node); - GAPI_EXPORTS void linkIn (Graph &g, ade::NodeHandle op, ade::NodeHandle obj, std::size_t in_port); - GAPI_EXPORTS void linkOut (Graph &g, ade::NodeHandle op, ade::NodeHandle obj, std::size_t out_port); + GAPI_EXPORTS ade::EdgeHandle linkIn (Graph &g, ade::NodeHandle op, ade::NodeHandle obj, std::size_t in_port); + GAPI_EXPORTS ade::EdgeHandle linkOut (Graph &g, ade::NodeHandle op, ade::NodeHandle obj, std::size_t out_port); - GAPI_EXPORTS void redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to); - GAPI_EXPORTS void redirectWriter (Graph &g, ade::NodeHandle from, ade::NodeHandle to); + GAPI_EXPORTS std::vector redirectReaders(Graph &g, ade::NodeHandle from, ade::NodeHandle to); + GAPI_EXPORTS ade::EdgeHandle redirectWriter (Graph &g, ade::NodeHandle from, ade::NodeHandle to); GAPI_EXPORTS std::vector orderedInputs (const ConstGraph &g, ade::NodeHandle nh); GAPI_EXPORTS std::vector orderedOutputs(const ConstGraph &g, ade::NodeHandle nh); diff --git a/modules/gapi/src/compiler/gmodelbuilder.cpp b/modules/gapi/src/compiler/gmodelbuilder.cpp index 80abadd9c6..5f8f3518fc 100644 --- a/modules/gapi/src/compiler/gmodelbuilder.cpp +++ b/modules/gapi/src/compiler/gmodelbuilder.cpp @@ -134,12 +134,19 @@ cv::gimpl::Unrolled cv::gimpl::unrollExpr(const GProtoArgs &ins, // Put the outputs object description of the node // so that they are not lost if they are not consumed by other operations + GAPI_Assert(call_p.m_k.outCtors.size() == call_p.m_k.outShapes.size()); for (const auto &it : ade::util::indexed(call_p.m_k.outShapes)) { std::size_t port = ade::util::index(it); GShape shape = ade::util::value(it); - GOrigin org { shape, node, port, {}, origin.kind }; + // FIXME: then use ZIP + HostCtor ctor = call_p.m_k.outCtors[port]; + + // NB: Probably this fixes all other "missing host ctor" + // problems. + // TODO: Clean-up the old workarounds if it really is. + GOrigin org {shape, node, port, std::move(ctor), origin.kind}; origins.insert(org); } diff --git a/modules/gapi/src/compiler/gobjref.hpp b/modules/gapi/src/compiler/gobjref.hpp index dd0939c439..bca6fa525e 100644 --- a/modules/gapi/src/compiler/gobjref.hpp +++ b/modules/gapi/src/compiler/gobjref.hpp @@ -16,15 +16,9 @@ namespace cv namespace gimpl { - // Union type for various user-defined type constructors (GArray, GOpaque, etc) - // FIXME: Replace construct-only API with a more generic one - // (probably with bits of introspection) - // Not required for non-user-defined types (GMat, GScalar, etc) - using HostCtor = util::variant - < util::monostate - , detail::ConstructVec - , detail::ConstructOpaque - >; + // HostCtor was there, but then moved to public + // Redeclare here to avoid changing tons of code + using HostCtor = cv::detail::HostCtor; using ConstVal = util::variant < util::monostate diff --git a/modules/gapi/src/compiler/gstreaming.cpp b/modules/gapi/src/compiler/gstreaming.cpp index 29c98ddfd4..eb06f3f6f2 100644 --- a/modules/gapi/src/compiler/gstreaming.cpp +++ b/modules/gapi/src/compiler/gstreaming.cpp @@ -69,6 +69,11 @@ bool cv::GStreamingCompiled::Priv::pull(cv::GRunArgsP &&outs) return m_exec->pull(std::move(outs)); } +bool cv::GStreamingCompiled::Priv::pull(cv::GOptRunArgsP &&outs) +{ + return m_exec->pull(std::move(outs)); +} + bool cv::GStreamingCompiled::Priv::try_pull(cv::GRunArgsP &&outs) { return m_exec->try_pull(std::move(outs)); @@ -113,6 +118,7 @@ bool cv::GStreamingCompiled::pull(cv::GRunArgsP &&outs) std::tuple cv::GStreamingCompiled::pull() { + // FIXME: Why it is not @ priv?? GRunArgs run_args; GRunArgsP outs; const auto& out_shapes = m_priv->outShapes(); @@ -144,6 +150,11 @@ std::tuple cv::GStreamingCompiled::pull() return std::make_tuple(is_over, run_args); } +bool cv::GStreamingCompiled::pull(cv::GOptRunArgsP &&outs) +{ + return m_priv->pull(std::move(outs)); +} + bool cv::GStreamingCompiled::try_pull(cv::GRunArgsP &&outs) { return m_priv->try_pull(std::move(outs)); diff --git a/modules/gapi/src/compiler/gstreaming_priv.hpp b/modules/gapi/src/compiler/gstreaming_priv.hpp index 73ca002f85..2f195ca226 100644 --- a/modules/gapi/src/compiler/gstreaming_priv.hpp +++ b/modules/gapi/src/compiler/gstreaming_priv.hpp @@ -42,6 +42,7 @@ public: void setSource(GRunArgs &&args); void start(); bool pull(cv::GRunArgsP &&outs); + bool pull(cv::GOptRunArgsP &&outs); bool try_pull(cv::GRunArgsP &&outs); void stop(); diff --git a/modules/gapi/src/compiler/passes/exec.cpp b/modules/gapi/src/compiler/passes/exec.cpp index 0eb8352b76..f6a73489eb 100644 --- a/modules/gapi/src/compiler/passes/exec.cpp +++ b/modules/gapi/src/compiler/passes/exec.cpp @@ -20,6 +20,7 @@ #include // util::optional #include "logger.hpp" // GAPI_LOG +#include "api/gbackend_priv.hpp" // for canMerge() #include "compiler/gmodel.hpp" #include "compiler/gislandmodel.hpp" #include "compiler/passes/passes.hpp" @@ -54,11 +55,28 @@ namespace // Also check the cases backend can't handle // (e.x. GScalar connecting two fluid ops should split the graph) const GModel::ConstGraph g(src_graph); + if (g.metadata().contains()) { + // Fusion of a graph having a desynchronized path is + // definitely non-trivial + return false; + } const auto& active_backends = g.metadata().get().backends; - return active_backends.size() == 1 && - ade::util::all_of(g.nodes(), [&](ade::NodeHandle nh) { - return !g.metadata(nh).contains(); - }); + if (active_backends.size() != 1u) { + // More than 1 backend involved - non-trivial + return false; + } + const auto& has_island_tags = [&](ade::NodeHandle nh) { + return g.metadata(nh).contains(); + }; + if (ade::util::any_of(g.nodes(), has_island_tags)) { + // There are user-defined islands - non-trivial + return false; + } + if (active_backends.begin()->priv().controlsMerge()) { + // If the only backend controls Island Fusion on its own - non-trivial + return false; + } + return true; } void fuseTrivial(GIslandModel::Graph &g, const ade::Graph &src_graph) @@ -125,9 +143,9 @@ namespace }; bool canMerge(const GIslandModel::Graph &g, - const ade::NodeHandle a_nh, - const ade::NodeHandle /*slot_nh*/, - const ade::NodeHandle b_nh, + const ade::NodeHandle &a_nh, + const ade::NodeHandle &slot_nh, + const ade::NodeHandle &b_nh, const MergeContext &ctx = MergeContext()) { auto a_ptr = g.metadata(a_nh).get().object; @@ -142,8 +160,8 @@ namespace // Islands which cause a cycle can't be merged as well // (since the flag is set, the procedure already tried to // merge these islands in the past) - if (ade::util::contains(ctx.cycle_causers, std::make_pair(a_ptr, b_ptr))|| - ade::util::contains(ctx.cycle_causers, std::make_pair(b_ptr, a_ptr))) + if ( ade::util::contains(ctx.cycle_causers, std::make_pair(a_ptr, b_ptr)) + || ade::util::contains(ctx.cycle_causers, std::make_pair(b_ptr, a_ptr))) return false; // There may be user-defined islands. Initially user-defined @@ -163,7 +181,13 @@ namespace return false; } - // FIXME: add a backend-specified merge checker + // If available, run the backend-specified merge checker + const auto &this_backend_p = a_ptr->backend().priv(); + if ( this_backend_p.controlsMerge() + && !this_backend_p.allowsMerge(g, a_nh, slot_nh, b_nh)) + { + return false; + } return true; } @@ -205,10 +229,31 @@ namespace { using namespace std::placeholders; + // Before checking for candidates, find and ban neighbor nodes + // (input or outputs) which are connected via desynchronized + // edges. + GIsland::node_set nodes_with_desync_edges; + for (const auto& in_eh : nh->inEdges()) { + if (g.metadata(in_eh).contains()) { + nodes_with_desync_edges.insert(in_eh->srcNode()); + } + } + for (const auto& output_data_nh : nh->outNodes()) { + for (const auto &out_reader_eh : output_data_nh->outEdges()) { + if (g.metadata(out_reader_eh).contains()) { + nodes_with_desync_edges.insert(out_reader_eh->dstNode()); + } + } + } + // Find a first matching candidate GIsland for merge // among inputs - for (const auto& input_data_nh : nh->inNodes()) + for (const auto& in_eh : nh->inEdges()) { + if (ade::util::contains(nodes_with_desync_edges, in_eh->srcNode())) { + continue; // desync edges can never be fused + } + const auto& input_data_nh = in_eh->srcNode(); if (input_data_nh->inNodes().size() != 0) { // Data node must have a single producer only @@ -224,14 +269,17 @@ namespace // Ok, now try to find it among the outputs for (const auto& output_data_nh : nh->outNodes()) { - auto mergeTest = [&](ade::NodeHandle cons_nh) -> bool { - return canMerge(g, nh, output_data_nh, cons_nh, ctx); + auto mergeTest = [&](ade::EdgeHandle cons_eh) -> bool { + if (ade::util::contains(nodes_with_desync_edges, cons_eh->dstNode())) { + return false; // desync edges can never be fused + } + return canMerge(g, nh, output_data_nh, cons_eh->dstNode(), ctx); }; - auto cand_it = std::find_if(output_data_nh->outNodes().begin(), - output_data_nh->outNodes().end(), + auto cand_it = std::find_if(output_data_nh->outEdges().begin(), + output_data_nh->outEdges().end(), mergeTest); - if (cand_it != output_data_nh->outNodes().end()) - return std::make_tuple(*cand_it, + if (cand_it != output_data_nh->outEdges().end()) + return std::make_tuple((*cand_it)->dstNode(), output_data_nh, Direction::Out); } // for(outNodes) @@ -251,6 +299,7 @@ namespace ade::NodeHandle m_slot; ade::NodeHandle m_cons; + using Change = ChangeT; Change::List m_changes; struct MergeObjects @@ -423,10 +472,10 @@ namespace auto backend = m_gim.metadata(m_prod).get() .object->backend(); auto merged = std::make_shared(backend, - std::move(mo.all), - std::move(mo.in_ops), - std::move(mo.out_ops), - std::move(maybe_user_tag)); + std::move(mo.all), + std::move(mo.in_ops), + std::move(mo.out_ops), + std::move(maybe_user_tag)); // FIXME: move this debugging to some user-controllable log-level #ifdef DEBUG_MERGE merged->debug(); @@ -440,7 +489,9 @@ namespace m_prod->inEdges().end()); for (auto in_edge : input_edges) { - m_changes.enqueue(m_g, in_edge->srcNode(), new_nh); + // FIXME: Introduce a Relink primitive instead? + // (combining the both actions into one?) + m_changes.enqueue(m_g, in_edge->srcNode(), new_nh, in_edge); m_changes.enqueue(m_g, m_prod, in_edge); } @@ -450,7 +501,7 @@ namespace m_cons->outEdges().end()); for (auto out_edge : output_edges) { - m_changes.enqueue(m_g, new_nh, out_edge->dstNode()); + m_changes.enqueue(m_g, new_nh, out_edge->dstNode(), out_edge); m_changes.enqueue(m_g, m_cons, out_edge); } @@ -491,6 +542,10 @@ namespace m_changes.enqueue(m_g, non_opt_slot_nh, eh); } } + // FIXME: No metadata copied here (from where??) + // For DesyncIslEdges it still works, as these tags are + // placed to Data->Op edges and this one is an Op->Data + // edge. m_changes.enqueue(m_g, new_nh, non_opt_slot_nh); } @@ -502,7 +557,7 @@ namespace m_prod->outEdges().end()); for (auto extra_out : prod_extra_out_edges) { - m_changes.enqueue(m_g, new_nh, extra_out->dstNode()); + m_changes.enqueue(m_g, new_nh, extra_out->dstNode(), extra_out); m_changes.enqueue(m_g, m_prod, extra_out); } @@ -514,7 +569,7 @@ namespace m_cons->inEdges().end()); for (auto extra_in : cons_extra_in_edges) { - m_changes.enqueue(m_g, extra_in->srcNode(), new_nh); + m_changes.enqueue(m_g, extra_in->srcNode(), new_nh, extra_in); m_changes.enqueue(m_g, m_cons, extra_in); } @@ -557,10 +612,10 @@ namespace there_was_a_merge = false; // FIXME: move this debugging to some user-controllable log level - #ifdef DEBUG_MERGE +#ifdef DEBUG_MERGE GAPI_LOG_INFO(NULL, "Before next merge attempt " << iteration << "..."); merge_debug(g, iteration); - #endif +#endif iteration++; auto sorted = pass_helpers::topoSort(im); for (auto nh : sorted) @@ -600,9 +655,9 @@ namespace "merge(" << l_obj->name() << "," << r_obj->name() << ") was successful!"); action.commit(); - #ifdef DEBUG_MERGE +#ifdef DEBUG_MERGE GIslandModel::syncIslandTags(gim, g); - #endif +#endif there_was_a_merge = true; break; // start do{}while from the beginning } diff --git a/modules/gapi/src/compiler/passes/intrin.cpp b/modules/gapi/src/compiler/passes/intrin.cpp new file mode 100644 index 0000000000..5d2707570a --- /dev/null +++ b/modules/gapi/src/compiler/passes/intrin.cpp @@ -0,0 +1,305 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + + +#include "precomp.hpp" + +#include +#include +#include // GDesync intrinsic + +#include "compiler/gmodel.hpp" +#include "compiler/passes/passes.hpp" + +namespace desync { +namespace { + +// Drop the desynchronized node `nh` from the graph, reconnect the +// graph structure properly. This is a helper function which is used +// in both drop(g) and apply(g) passes. +// +// @return a vector of new edge handles connecting the "main" graph +// with its desynchronized part. +std::vector drop(cv::gimpl::GModel::Graph &g, + ade::NodeHandle nh) { + using namespace cv::gimpl; + + // What we need to do here: + // 1. Connect the readers of its produced data objects + // to the input data objects of desync; + // 2. Drop the data object it produces. + // 3. Drop the desync operation itself; + std::vector in_data_objs = GModel::orderedInputs(g, nh); + std::vector out_data_objs = GModel::orderedOutputs(g, nh); + std::vector new_links; + GAPI_Assert(in_data_objs.size() == out_data_objs.size()); + GAPI_DbgAssert(ade::util::all_of + (out_data_objs, + [&](const ade::NodeHandle &oh) { + return g.metadata(oh).contains(); + })); + // (1) + for (auto &&it: ade::util::zip(ade::util::toRange(in_data_objs), + ade::util::toRange(out_data_objs))) { + auto these_new_links = GModel::redirectReaders(g, + std::get<1>(it), + std::get<0>(it)); + new_links.insert(new_links.end(), + these_new_links.begin(), + these_new_links.end()); + } + // (2) + for (auto &&old_out_nh : out_data_objs) { + g.erase(old_out_nh); + } + // (3) + g.erase(nh); + + return new_links; +} + +// Tracing a desynchronizing subgraph is somewhat tricky and happens +// in both directions: downwards and upwards. +// +// The downward process is the basic one: we start with a "desync" +// OP node and go down to the graph using the "output" edges. We check +// if all nodes on this path [can] belong to this desynchronized path +// and don't overlap with others. +// +// An important contract to maintain is that the desynchronized part +// can't have any input references from the "main" graph part or any +// other desynchronized part in the graph. This contract is validated +// by checking every node's input which must belong to the same +// desynchronized part. +// +// Here is the pitfall of this check: +// +// v +// GMat_0 +// v +// +----------+ +// | desync() | <- This point originates the traceDown process +// +----------+ +// v +// GMat_0' <- This node will be tagged for this desync at +// :--------. step 0/1 +// v : <- The order how output nodes are visited is not +// +----------+ : specified, we can visit Op2() first (as there +// | Op1() | : is a direct link) bypassing visiting and tagging +// +----------+ : Op1() and GMat_1 +// v : +// GMat_1 : +// : .---' +// v v <- When we visit Op2() via the 2nd edge on this +// +----------+ graph, we check if all inputs belong to the same +// | Op2() | desynchronized graph and GMat_1 fails this check +// +----------+ (since the traceDown() process haven't visited +// it yet). +// +// Cases like this originate the traceUp() process: if we find an +// input node in our desynchronized path which doesn't belong to this +// path YET, it is not 100% a problem, and we need to trace it back +// (upwards) to see if it is really a case. + +// This recursive function checks the desync_id in the graph upwards. +// The process doesn't continue for nodes which have a valid +// desync_id already. +// The process only continues for nodes which have no desync_id +// assigned. If there's no such nodes anymore, the procedure is +// considered complete and a list of nodes to tag is returned to the +// caller. +// +// If NO inputs of this node have a valid desync_id, the desync +// invariant is broken and the function throws. +void traceUp(cv::gimpl::GModel::Graph &g, + const ade::NodeHandle &nh, + int desync_id, + std::vector &path) { + using namespace cv::gimpl; + + GAPI_Assert(!nh->inNodes().empty() + && "traceUp: a desynchronized part of the graph is not isolated?"); + + if (g.metadata(nh).contains()) { + // We may face nodes which have DesyncPath already visited during + // this recursive process (e.g. via some other output or branch in the + // subgraph) + if (g.metadata(nh).get().index != desync_id) { + GAPI_Assert(false && "Desynchronization can't be nested!"); + } + return; // This object belongs to the desync path - exit early. + } + + // Regardless of the result, put this nh to the path + path.push_back(nh); + + // Check if the input nodes are OK + std::vector nodes_to_trace; + nodes_to_trace.reserve(nh->inNodes().size()); + for (auto &&in_nh : nh->inNodes()) { + if (g.metadata(in_nh).contains()) { + // We may face nodes which have DesyncPath already visited during + // this recursive process (e.g. via some other output or branch in the + // subgraph) + GAPI_Assert(g.metadata(in_nh).get().index == desync_id + && "Desynchronization can't be nested!"); + } else { + nodes_to_trace.push_back(in_nh); + } + } + + // If there are nodes to trace, continue the recursion + for (auto &&up_nh : nodes_to_trace) { + traceUp(g, up_nh, desync_id, path); + } +} + +// This recursive function propagates the desync_id down to the graph +// starting at nh, and also checks: +// - if this desync path is isolated; +// - if this desync path is not overlapped. +// It also originates the traceUp() process at the points of +// uncertainty (as described in the comment above). +void traceDown(cv::gimpl::GModel::Graph &g, + const ade::NodeHandle &nh, + int desync_id) { + using namespace cv::gimpl; + + if (g.metadata(nh).contains()) { + // We may face nodes which have DesyncPath already visited during + // this recursive process (e.g. via some other output or branch in the + // subgraph) + GAPI_Assert(g.metadata(nh).get().index == desync_id + && "Desynchronization can't be nested!"); + } else { + g.metadata(nh).set(DesyncPath{desync_id}); + } + + // All inputs of this data object must belong to the same + // desync path. + for (auto &&in_nh : nh->inNodes()) { + // If an input object is not assigned to this desync path, + // it does not means that the object doesn't belong to + // this path. Check it. + std::vector path_up; + traceUp(g, in_nh, desync_id, path_up); + // We get here on success. Just set the proper tags for + // the identified input path. + for (auto &&up_nh : path_up) { + g.metadata(up_nh).set(DesyncPath{desync_id}); + } + } + + // Propagate the tag & check down + for (auto &&out_nh : nh->outNodes()) { + traceDown(g, out_nh, desync_id); + } +} + +// Streaming case: ensure the graph has proper isolation of the +// desynchronized parts, set proper Edge metadata hints for +// GStreamingExecutable +void apply(cv::gimpl::GModel::Graph &g) { + using namespace cv::gimpl; + + // Stage 0. Trace down the desync operations in the graph. + // Tag them with their unique (per graph) identifiers. + int total_desync = 0; + for (auto &&nh : g.nodes()) { + if (g.metadata(nh).get().t == NodeType::OP) { + const auto &op = g.metadata(nh).get(); + if (op.k.name == cv::gapi::streaming::detail::GDesync::id()) { + GAPI_Assert(!g.metadata(nh).contains() + && "Desynchronization can't be nested!"); + const int this_desync_id = total_desync++; + g.metadata(nh).set(DesyncPath{this_desync_id}); + for (auto &&out_nh: nh->outNodes()) { + traceDown(g, out_nh, this_desync_id); + } + } // if (desync) + } // if(OP) + } // for(nodes) + + // Tracing is done for all desync ops in the graph now. + // Stage 1. Drop the desync operations from the graph, but mark + // the desynchronized edges a special way. + // The desynchronized edge is the edge which connects a main + // subgraph data with a desynchronized subgraph data. + std::vector nodes(g.nodes().begin(), g.nodes().end()); + for (auto &&nh : nodes) { + if (nh == nullptr) { + // Some nodes could be dropped already during the procedure + // thanks ADE their NodeHandles updated automatically + continue; + } + if (g.metadata(nh).get().t == NodeType::OP) { + const auto &op = g.metadata(nh).get(); + if (op.k.name == cv::gapi::streaming::detail::GDesync::id()) { + auto index = g.metadata(nh).get().index; + auto new_links = drop(g, nh); + for (auto &&eh : new_links) { + g.metadata(eh).set(DesyncEdge{index}); + } + } // if (desync) + } // if (Op) + } // for(nodes) + + // Stage 2. Put a synchronized tag if there were changes applied + if (total_desync > 0) { + g.metadata().set(Desynchronized{}); + } +} + +// Probably the simplest case: desync makes no sense in the regular +// compilation process, so just drop all its occurences in the graph, +// reconnecting nodes properly. +void drop(cv::gimpl::GModel::Graph &g) { + // FIXME: LOG here that we're dropping the desync operations as + // they have no sense when compiling in the regular mode. + using namespace cv::gimpl; + std::vector nodes(g.nodes().begin(), g.nodes().end()); + for (auto &&nh : nodes) { + if (nh == nullptr) { + // Some nodes could be dropped already during the procedure + // thanks ADE their NodeHandles updated automatically + continue; + } + if (g.metadata(nh).get().t == NodeType::OP) { + const auto &op = g.metadata(nh).get(); + if (op.k.name == cv::gapi::streaming::detail::GDesync::id()) { + drop(g, nh); + } // if (desync) + } // if (Op) + } // for(nodes) +} + +} // anonymous namespace +} // namespace desync + +void cv::gimpl::passes::intrinDesync(ade::passes::PassContext &ctx) { + GModel::Graph gr(ctx.graph); + if (!gr.metadata().contains()) + return; + + gr.metadata().contains() + ? desync::apply(gr) // Streaming compilation + : desync::drop(gr); // Regular compilation +} + +// Clears the HasIntrinsics flag if all intrinsics have been handled. +void cv::gimpl::passes::intrinFinalize(ade::passes::PassContext &ctx) { + GModel::Graph gr(ctx.graph); + for (auto &&nh : gr.nodes()) { + if (gr.metadata(nh).get().t == NodeType::OP) { + const auto &op = gr.metadata(nh).get(); + if (is_intrinsic(op.k.name)) { + return; + } + } + } + // If reached here, really clear the flag + gr.metadata().erase(); +} diff --git a/modules/gapi/src/compiler/passes/kernels.cpp b/modules/gapi/src/compiler/passes/kernels.cpp index 69b339fb1e..100a32ec57 100644 --- a/modules/gapi/src/compiler/passes/kernels.cpp +++ b/modules/gapi/src/compiler/passes/kernels.cpp @@ -14,6 +14,7 @@ #include // compound::backend() #include // GKernelPackage #include // GNetPackage +#include // GDesync intrinsic #include "compiler/gmodel.hpp" #include "compiler/passes/passes.hpp" @@ -24,6 +25,20 @@ #include "logger.hpp" // GAPI_LOG #include "api/gproto_priv.hpp" // is_dynamic, rewrap +namespace +{ + // FIXME: This may be not the right design choice, but so far it works + const std::vector known_intrinsics = { + cv::gapi::streaming::detail::GDesync::id() + }; +} +bool cv::gimpl::is_intrinsic(const std::string &s) { + // FIXME: This search might be better in time once we start using string + return std::find(known_intrinsics.begin(), + known_intrinsics.end(), + s) != known_intrinsics.end(); +} + namespace { struct ImplInfo @@ -130,8 +145,13 @@ void cv::gimpl::passes::bindNetParams(ade::passes::PassContext &ctx, } } -// This pass, given the kernel package, selects a kernel implementation -// for every operation in the graph +// This pass, given the kernel package, selects a kernel +// implementation for every operation in the graph +// +// Starting OpenCV 4.3, G-API may have some special "intrinsic" +// operations. Those can be implemented by backends as regular +// kernels, but if not, they are handled by the framework itself in +// its optimization/execution passes. void cv::gimpl::passes::resolveKernels(ade::passes::PassContext &ctx, const gapi::GKernelPackage &kernels) { @@ -142,7 +162,25 @@ void cv::gimpl::passes::resolveKernels(ade::passes::PassContext &ctx, { if (gr.metadata(nh).get().t == NodeType::OP) { + // If the operation is known to be intrinsic and is NOT + // implemented in the package, just skip it - there should + // be some pass which handles it. auto &op = gr.metadata(nh).get(); + if (is_intrinsic(op.k.name) && !kernels.includesAPI(op.k.name)) { + gr.metadata().set(HasIntrinsics{}); + continue; + } + // FIXME: And this logic is terribly wrong. The right + // thing is to assign an intrinsic to a particular island + // if and only if it is: + // (a) surrounded by nodes of backend X, AND + // (b) is supported by backend X. + // Here we may have multiple backends supporting an + // intrinsic but only one of those gets selected. And + // this is exactly a situation we need multiple versions + // of the same kernel to be presented in the kernel + // package (as it was designed originally). + cv::gapi::GBackend selected_backend; cv::GKernelImpl selected_impl; std::tie(selected_backend, selected_impl) = kernels.lookup(op.k.name); @@ -181,6 +219,12 @@ void cv::gimpl::passes::expandKernels(ade::passes::PassContext &ctx, const gapi: if (gr.metadata(nh).get().t == NodeType::OP) { const auto& op = gr.metadata(nh).get(); + // FIXME: Essentially the same problem as in the above resolveKernels + if (is_intrinsic(op.k.name) && !kernels.includesAPI(op.k.name)) { + // Note: There's no need to set HasIntrinsics flag here + // since resolveKernels would do it later. + continue; + } cv::gapi::GBackend selected_backend; cv::GKernelImpl selected_impl; diff --git a/modules/gapi/src/compiler/passes/passes.hpp b/modules/gapi/src/compiler/passes/passes.hpp index 84142fc055..8f187f6bb7 100644 --- a/modules/gapi/src/compiler/passes/passes.hpp +++ b/modules/gapi/src/compiler/passes/passes.hpp @@ -31,7 +31,11 @@ namespace gapi { struct GNetPackage; } // namespace gapi -namespace gimpl { namespace passes { +namespace gimpl { + +bool is_intrinsic(const std::string &op_name); + +namespace passes { void dumpDot(const ade::Graph &g, std::ostream& os); void dumpDot(ade::passes::PassContext &ctx, std::ostream& os); @@ -66,6 +70,9 @@ void applyTransformations(ade::passes::PassContext &ctx, void addStreaming(ade::passes::PassContext &ctx); +void intrinDesync(ade::passes::PassContext &ctx); +void intrinFinalize(ade::passes::PassContext &ctx); + }} // namespace gimpl::passes } // namespace cv diff --git a/modules/gapi/src/compiler/transactions.hpp b/modules/gapi/src/compiler/transactions.hpp index 54af8a6e69..bdc1723e19 100644 --- a/modules/gapi/src/compiler/transactions.hpp +++ b/modules/gapi/src/compiler/transactions.hpp @@ -14,6 +14,7 @@ #include +#include "opencv2/gapi/util/util.hpp" // Seq #include "opencv2/gapi/own/assert.hpp" enum class Direction: int {Invalid, In, Out}; @@ -21,8 +22,50 @@ enum class Direction: int {Invalid, In, Out}; //////////////////////////////////////////////////////////////////////////// //// // TODO: Probably it can be moved to ADE +template +class Preserved +{ + using S = typename cv::detail::MkSeq::type; + std::tuple...> m_data; -namespace Change + template + cv::util::optional get(ade::ConstTypedGraph g, H h) { + return g.metadata(h).template contains() + ? cv::util::make_optional(g.metadata(h).template get()) + : cv::util::optional{}; + } + template + int set(ade::TypedGraph &g, H &h) { + const auto &opt = std::get(m_data); + if (opt.has_value()) + g.metadata(h).set(opt.value()); + return 0; + } + template + void copyTo_impl(ade::TypedGraph &g, H h, cv::detail::Seq) { + int unused[] = {0, set(g, h)...}; + (void) unused; + } +public: + Preserved(const ade::Graph &g, H h) { + ade::ConstTypedGraph tg(g); + m_data = std::make_tuple(get(tg, h)...); + } + void copyTo(ade::Graph &g, H h) { + ade::TypedGraph tg(g); + copyTo_impl(tg, h, S{}); + } +}; +// Do nothing if there's no metadata +template +class Preserved { +public: + Preserved(const ade::Graph &, H) {} + void copyTo(ade::Graph &, H) {} +}; + +template +struct ChangeT { struct Base { @@ -31,6 +74,8 @@ namespace Change virtual ~Base() = default; }; + template using Preserved = ::Preserved; + class NodeCreated final: public Base { ade::NodeHandle m_node; @@ -39,11 +84,7 @@ namespace Change virtual void rollback(ade::Graph &g) override { g.erase(m_node); } }; - // NB: Drops all metadata stored in the EdgeHandle, - // which is not restored even in the rollback - - // FIXME: either add a way for users to preserve meta manually - // or extend ADE to manipulate with meta such way + // FIXME: maybe extend ADE to clone/copy the whole metadata? class DropLink final: public Base { ade::NodeHandle m_node; @@ -51,13 +92,15 @@ namespace Change ade::NodeHandle m_sibling; + Preserved m_meta; + public: DropLink(ade::Graph &g, const ade::NodeHandle &node, const ade::EdgeHandle &edge) - : m_node(node), m_dir(node == edge->srcNode() - ? Direction::Out - : Direction::In) + : m_node(node) + , m_dir(node == edge->srcNode() ? Direction::Out : Direction::In) + , m_meta(g, edge) { m_sibling = (m_dir == Direction::In ? edge->srcNode() @@ -67,12 +110,17 @@ namespace Change virtual void rollback(ade::Graph &g) override { + // FIXME: Need to preserve metadata here! + // GIslandModel edges now have metadata + ade::EdgeHandle eh; switch(m_dir) { - case Direction::In: g.link(m_sibling, m_node); break; - case Direction::Out: g.link(m_node, m_sibling); break; + case Direction::In: eh = g.link(m_sibling, m_node); break; + case Direction::Out: eh = g.link(m_node, m_sibling); break; default: GAPI_Assert(false); } + GAPI_Assert(eh != nullptr); + m_meta.copyTo(g, eh); } }; @@ -82,10 +130,15 @@ namespace Change public: NewLink(ade::Graph &g, - const ade::NodeHandle &prod, - const ade::NodeHandle &cons) + const ade::NodeHandle &prod, + const ade::NodeHandle &cons, + const ade::EdgeHandle ©_from = ade::EdgeHandle()) : m_edge(g.link(prod, cons)) { + if (copy_from != nullptr) + { + Preserved(g, copy_from).copyTo(g, m_edge); + } } virtual void rollback(ade::Graph &g) override @@ -141,7 +194,7 @@ namespace Change } } }; -} // namespace Change +}; // struct Change //////////////////////////////////////////////////////////////////////////// #endif // OPENCV_GAPI_COMPILER_TRANSACTIONS_HPP diff --git a/modules/gapi/src/executor/conc_queue.hpp b/modules/gapi/src/executor/conc_queue.hpp index 5de50ef34b..9875e8245a 100644 --- a/modules/gapi/src/executor/conc_queue.hpp +++ b/modules/gapi/src/executor/conc_queue.hpp @@ -119,8 +119,7 @@ void concurrent_bounded_queue::set_capacity(std::size_t capacity) { // Clear the queue. Similar to the TBB version, this method is not // thread-safe. template -void concurrent_bounded_queue::clear() -{ +void concurrent_bounded_queue::clear() { m_data = std::queue{}; } diff --git a/modules/gapi/src/executor/gstreamingexecutor.cpp b/modules/gapi/src/executor/gstreamingexecutor.cpp index afdebee020..41cb83f710 100644 --- a/modules/gapi/src/executor/gstreamingexecutor.cpp +++ b/modules/gapi/src/executor/gstreamingexecutor.cpp @@ -6,6 +6,7 @@ #include "precomp.hpp" +#include // make_shared #include #include @@ -60,14 +61,23 @@ public: struct DataQueue { static const char *name() { return "StreamingDataQueue"; } + enum tag { DESYNC }; // Enum of 1 element: purely a syntax sugar explicit DataQueue(std::size_t capacity) { - if (capacity) { - q.set_capacity(capacity); + // Note: `ptr` is shared, while the `q` is a shared + auto ptr = std::make_shared(); + if (capacity != 0) { + ptr->set_capacity(capacity); } + q = std::move(ptr); + } + explicit DataQueue(tag t) + : q(new cv::gimpl::stream::DesyncQueue()) { + GAPI_Assert(t == DESYNC); } - cv::gimpl::stream::Q q; + // FIXME: ADE metadata requires types to be copiable + std::shared_ptr q; }; std::vector reader_queues( ade::Graph &g, @@ -77,7 +87,7 @@ std::vector reader_queues( ade::Graph &g, std::vector result; for (auto &&out_eh : obj->outEdges()) { - result.push_back(&qgr.metadata(out_eh).get().q); + result.push_back(qgr.metadata(out_eh).get().q.get()); } return result; } @@ -90,7 +100,7 @@ std::vector input_queues( ade::Graph &g, for (auto &&in_eh : obj->inEdges()) { result.push_back(qgr.metadata(in_eh).contains() - ? &qgr.metadata(in_eh).get().q + ? qgr.metadata(in_eh).get().q.get() : nullptr); } return result; @@ -133,6 +143,77 @@ void sync_data(cv::GRunArgs &results, cv::GRunArgsP &outputs) } } +// FIXME: Is there a way to derive function from its GRunArgsP version? +template using O = cv::util::optional; +void sync_data(cv::gimpl::stream::Result &r, cv::GOptRunArgsP &outputs) +{ + namespace own = cv::gapi::own; + + for (auto && it : ade::util::zip(ade::util::toRange(outputs), + ade::util::toRange(r.args), + ade::util::toRange(r.flags))) + { + auto &out_obj = std::get<0>(it); + auto &res_obj = std::get<1>(it); + bool available = std::get<2>(it); + + using T = cv::GOptRunArgP; +#define HANDLE_CASE(Type) \ + case T::index_of*>(): \ + if (available) { \ + *cv::util::get*>(out_obj) \ + = cv::util::make_optional(std::move(cv::util::get(res_obj))); \ + } else { \ + cv::util::get*>(out_obj)->reset(); \ + } + + // FIXME: this conversion should be unified + switch (out_obj.index()) + { + HANDLE_CASE(cv::Scalar); break; + HANDLE_CASE(cv::RMat); break; + + case T::index_of*>(): { + // Mat: special handling. + auto &mat_opt = *cv::util::get*>(out_obj); + if (available) { + auto q_map = cv::util::get(res_obj).access(cv::RMat::Access::R); + // FIXME: Copy! Maybe we could do some optimization for this case! + // e.g. don't handle RMat for last ilsand in the graph. + // It is not always possible though. + mat_opt = cv::util::make_optional(cv::gimpl::asMat(q_map).clone()); + } else { + mat_opt.reset(); + } + } break; + case T::index_of(): { + // std::vector<>: special handling + auto &vec_opt = cv::util::get(out_obj); + if (available) { + vec_opt.mov(cv::util::get(res_obj)); + } else { + vec_opt.reset(); + } + } break; + case T::index_of(): { + // std::vector<>: special handling + auto &opq_opt = cv::util::get(out_obj); + if (available) { + opq_opt.mov(cv::util::get(res_obj)); + } else { + opq_opt.reset(); + } + } break; + default: + // ...maybe because of STANDALONE mode. + GAPI_Assert(false && "This value type is not supported!"); + break; + } + } +#undef HANDLE_CASE +} + + // Pops an item from every input queue and combine it to the final // result. Blocks the current thread. Returns true if the vector has // been obtained successfully and false if a Stop message has been @@ -206,12 +287,39 @@ class QueueReader bool m_finishing = false; // Set to true once a "soft" stop is received std::vector m_cmd; + void rewindToStop(std::vector &in_queues, + const std::size_t this_id); + public: - bool getInputVector(std::vector &in_queues, - cv::GRunArgs &in_constants, - cv::GRunArgs &isl_inputs); + bool getInputVector (std::vector &in_queues, + cv::GRunArgs &in_constants, + cv::GRunArgs &isl_inputs); + + bool getResultsVector(std::vector &in_queues, + const std::vector &in_mapping, + const std::size_t out_size, + cv::GRunArgs &out_results); }; +// This method handles a stop sign got from some input +// island. Reiterate through all _remaining valid_ queues (some of +// them can be set to nullptr already -- see handling in +// getInputVector) and rewind data to every Stop sign per queue. +void QueueReader::rewindToStop(std::vector &in_queues, + const std::size_t this_id) +{ + for (auto &&qit : ade::util::indexed(in_queues)) + { + auto id2 = ade::util::index(qit); + auto &q2 = ade::util::value(qit); + if (this_id == id2) continue; + + Cmd cmd; + while (q2 && !cv::util::holds_alternative(cmd)) + q2->pop(cmd); + } +} + bool QueueReader::getInputVector(std::vector &in_queues, cv::GRunArgs &in_constants, cv::GRunArgs &isl_inputs) @@ -271,20 +379,7 @@ bool QueueReader::getInputVector(std::vector &in_queues, else { GAPI_Assert(stop.kind == Stop::Kind::HARD); - // Just got a stop sign. Reiterate through all - // _remaining valid_ queues (some of them can be - // set to nullptr already -- see above) and rewind - // data to every Stop sign per queue - for (auto &&qit : ade::util::indexed(in_queues)) - { - auto id2 = ade::util::index(qit); - auto &q2 = ade::util::value(qit); - if (id == id2) continue; - - Cmd cmd2; - while (q2 && !cv::util::holds_alternative(cmd2)) - q2->pop(cmd2); - } + rewindToStop(in_queues, id); // After queues are read to the proper indicator, // indicate end-of-stream return false; @@ -303,6 +398,60 @@ bool QueueReader::getInputVector(std::vector &in_queues, return true; // A regular case - there is data to process. } +// This is a special method to obtain a result vector +// for the entire pipeline's outputs. +// +// After introducing desync(), the pipeline output's vector +// can be produced just partially. Also, if a desynchronized +// path has multiple outputs for the pipeline, _these_ outputs +// should still come synchronized to the end user (via pull()) +// +// +// This method handles all this. +// It takes a number of input queues, which may or may not be +// equal to the number of pipeline outputs (<=). +// It also takes indexes saying which queue produces which +// output in the resulting pipeline. +// +// `out_results` is always produced with the size of full output +// vector. In the desync case, the number of in_queues will +// be less than this size and some of the items won't be produced. +// In the sync case, there will be a 1-1 mapping. +// +// In the desync case, there _will be_ multiple collector threads +// calling this method, and pushing their whole-pipeline outputs +// (_may be_ partially filled) to the same final output queue. +// The receiver part at the GStreamingExecutor level won't change +// because of that. +bool QueueReader::getResultsVector(std::vector &in_queues, + const std::vector &in_mapping, + const std::size_t out_size, + cv::GRunArgs &out_results) +{ + m_cmd.resize(out_size); + for (auto &&it : ade::util::indexed(in_queues)) + { + auto ii = ade::util::index(it); + auto oi = in_mapping[ii]; + auto &q = ade::util::value(it); + q->pop(m_cmd[oi]); + if (!cv::util::holds_alternative(m_cmd[oi])) + { + out_results[oi] = std::move(cv::util::get(m_cmd[oi])); + } + else // A Stop sign + { + // In theory, the CNST should never reach here. + // Collector thread never handles the inputs directly + // (collector's input queues are always produced by + // islands in the graph). + rewindToStop(in_queues, ii); + return false; + } // if(Stop) + } // for(in_queues) + return true; +} + // This thread is a plain dump source actor. What it do is just: // - Check input queue (the only one) for a control command @@ -603,22 +752,78 @@ void islandActorThread(std::vector in_rcs, // // and then put the resulting vector into one single queue. While it // looks redundant, it simplifies dramatically the way how try_pull() // is implemented - we need to check one queue instead of many. -void collectorThread(std::vector in_queues, - Q& out_queue) +// +// After desync() is added, there may be multiple collector threads +// running, every thread producing its own part of the partial +// pipeline output (optional...). All partial outputs are pushed +// to the same output queue and then picked by GStreamingExecutor +// in the end. +void collectorThread(std::vector in_queues, + std::vector in_mapping, + const std::size_t out_size, + Q& out_queue) { + // These flags are static now: regardless if the sync or + // desync branch is collected by this thread, all in_queue + // data should come in sync. + std::vector flags(out_size, false); + for (auto idx : in_mapping) { + flags[idx] = true; + } + QueueReader qr; while (true) { - cv::GRunArgs this_result(in_queues.size()); - cv::GRunArgs this_const(in_queues.size()); - if (!qr.getInputVector(in_queues, this_const, this_result)) + cv::GRunArgs this_result(out_size); + if (!qr.getResultsVector(in_queues, in_mapping, out_size, this_result)) { out_queue.push(Cmd{Stop{}}); return; } - out_queue.push(Cmd{this_result}); + out_queue.push(Cmd{Result{std::move(this_result), flags}}); } } + +void check_DesyncObjectConsumedByMultipleIslands(const cv::gimpl::GIslandModel::Graph &gim) { + using namespace cv::gimpl; + + // Since the limitation exists only in this particular + // implementation, the check is also done only here but not at the + // graph compiler level. + // + // See comment in desync(GMat) src/api/kernels_streaming.cpp for details. + for (auto &&nh : gim.nodes()) { + if (gim.metadata(nh).get().k == NodeKind::SLOT) { + // SLOTs are read by ISLANDs, so look for the metadata + // of the outbound edges + std::unordered_map out_desync_islands; + for (auto &&out_eh : nh->outEdges()) { + if (gim.metadata(out_eh).contains()) { + // This is a desynchronized edge + // Look what Island it leads to + const auto out_desync_idx = gim.metadata(out_eh) + .get().index; + const auto out_island = gim.metadata(out_eh->dstNode()) + .get().object; + + auto it = out_desync_islands.find(out_desync_idx); + if (it != out_desync_islands.end()) { + // If there's already an edge with this desync + // id, it must point to the same island object + GAPI_Assert(it->second == out_island.get() + && "A single desync object may only be used by a single island!"); + } else { + // Store the island pointer for the further check + out_desync_islands[out_desync_idx] = out_island.get(); + } + } // if(desync) + } // for(out_eh) + // There must be only one backend in the end of the day + // (under this desync path) + } // if(SLOT) + } // for(nodes) +} + } // anonymous namespace // GStreamingExecutor expects compile arguments as input to have possibility to do @@ -630,20 +835,28 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr && .get().model) , m_comp_args(comp_args) , m_gim(*m_island_graph) + , m_desync(GModel::Graph(*m_orig_graph).metadata() + .contains()) { GModel::Graph gm(*m_orig_graph); // NB: Right now GIslandModel is acyclic, and all the below code assumes that. - // NB: This naive execution code is taken from GExecutor nearly "as-is" + // NB: This naive execution code is taken from GExecutor nearly + // "as-is" + + if (m_desync) { + check_DesyncObjectConsumedByMultipleIslands(m_gim); + } const auto proto = gm.metadata().get(); m_emitters .resize(proto.in_nhs.size()); m_emitter_queues.resize(proto.in_nhs.size()); m_sinks .resize(proto.out_nhs.size()); - m_sink_queues .resize(proto.out_nhs.size()); + m_sink_queues .resize(proto.out_nhs.size(), nullptr); + m_sink_sync .resize(proto.out_nhs.size(), -1); // Very rough estimation to limit internal queue sizes. // Pipeline depth is equal to number of its (pipeline) steps. - const auto queue_capacity = std::count_if + const auto queue_capacity = 3*std::count_if (m_gim.nodes().begin(), m_gim.nodes().end(), [&](ade::NodeHandle nh) { @@ -728,8 +941,12 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr && { // ...only if the data is not compile-const if (const_ins.count(eh->srcNode()) == 0) { - qgr.metadata(eh).set(DataQueue(queue_capacity)); - m_internal_queues.insert(&qgr.metadata(eh).get().q); + if (m_gim.metadata(eh).contains()) { + qgr.metadata(eh).set(DataQueue(DataQueue::DESYNC)); + } else { + qgr.metadata(eh).set(DataQueue(queue_capacity)); + } + m_internal_queues.insert(qgr.metadata(eh).get().q.get()); } } } @@ -760,7 +977,14 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr && ade::TypedGraph qgr(*m_island_graph); GAPI_Assert(nh->inEdges().size() == 1u); qgr.metadata(nh->inEdges().front()).set(DataQueue(queue_capacity)); - m_sink_queues[sink_idx] = &qgr.metadata(nh->inEdges().front()).get().q; + m_sink_queues[sink_idx] = qgr.metadata(nh->inEdges().front()).get().q.get(); + + // Assign a desync tag + const auto sink_out_nh = gm.metadata().get().out_nhs[sink_idx]; + if (gm.metadata(sink_out_nh).contains()) { + // metadata().get_or<> could make this thing better + m_sink_sync[sink_idx] = gm.metadata(sink_out_nh).get().index; + } } break; default: @@ -768,7 +992,23 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr && break; } // switch(kind) } // for(gim nodes) - m_out_queue.set_capacity(queue_capacity); + + // If there are desynchronized parts in the graph, there may be + // multiple theads polling every separate (desynchronized) + // branch in the graph individually. Prepare a mapping information + // for any such thread + for (auto &&idx : ade::util::iota(m_sink_queues.size())) { + auto path_id = m_sink_sync[idx]; + auto &info = m_collector_map[path_id]; + info.queues.push_back(m_sink_queues[idx]); + info.mapping.push_back(static_cast(idx)); + } + + // Reserve space in the final queue based on the number + // of desync parts (they can generate output individually + // per the same input frame, so the output traffic multiplies) + GAPI_Assert(m_collector_map.size() > 0u); + m_out_queue.set_capacity(queue_capacity * m_collector_map.size()); } cv::gimpl::GStreamingExecutor::~GStreamingExecutor() @@ -938,6 +1178,9 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins) real_video_completion_cb); } + for (auto &&op : m_ops) { + op.isl_exec->handleNewStream(); + } // Now do this for every island (in a topological order) for (auto &&op : m_ops) @@ -974,10 +1217,17 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins) out_queues); } - // Finally, start a collector thread. - m_threads.emplace_back(collectorThread, - m_sink_queues, - std::ref(m_out_queue)); + // Finally, start collector thread(s). + // If there are desynchronized parts in the graph, there may be + // multiple theads polling every separate (desynchronized) + // branch in the graph individually. + for (auto &&info : m_collector_map) { + m_threads.emplace_back(collectorThread, + info.second.queues, + info.second.mapping, + m_sink_queues.size(), + std::ref(m_out_queue)); + } state = State::READY; } @@ -1018,15 +1268,25 @@ void cv::gimpl::GStreamingExecutor::wait_shutdown() for (auto &q : m_internal_queues) q->clear(); m_out_queue.clear(); + for (auto &&op : m_ops) { + op.isl_exec->handleStopStream(); + } + state = State::STOPPED; } bool cv::gimpl::GStreamingExecutor::pull(cv::GRunArgsP &&outs) { + // This pull() can only be called when there's no desynchronized + // parts in the graph. + GAPI_Assert(!m_desync && + "This graph has desynchronized parts! Please use another pull()"); + if (state == State::STOPPED) return false; GAPI_Assert(state == State::RUNNING); - GAPI_Assert(m_sink_queues.size() == outs.size()); + GAPI_Assert(m_sink_queues.size() == outs.size() && + "Number of data objects in cv::gout() must match the number of graph outputs in cv::GOut()"); Cmd cmd; m_out_queue.pop(cmd); @@ -1036,12 +1296,39 @@ bool cv::gimpl::GStreamingExecutor::pull(cv::GRunArgsP &&outs) return false; } - GAPI_Assert(cv::util::holds_alternative(cmd)); - cv::GRunArgs &this_result = cv::util::get(cmd); + GAPI_Assert(cv::util::holds_alternative(cmd)); + cv::GRunArgs &this_result = cv::util::get(cmd).args; sync_data(this_result, outs); return true; } +bool cv::gimpl::GStreamingExecutor::pull(cv::GOptRunArgsP &&outs) +{ + // This pull() can only be called in both cases: if there are + // desyncrhonized parts or not. + + // FIXME: so far it is a full duplicate of standard pull except + // the sync_data version called. + if (state == State::STOPPED) + return false; + GAPI_Assert(state == State::RUNNING); + GAPI_Assert(m_sink_queues.size() == outs.size() && + "Number of data objects in cv::gout() must match the number of graph outputs in cv::GOut()"); + + Cmd cmd; + m_out_queue.pop(cmd); + if (cv::util::holds_alternative(cmd)) + { + wait_shutdown(); + return false; + } + + GAPI_Assert(cv::util::holds_alternative(cmd)); + sync_data(cv::util::get(cmd), outs); + return true; +} + + bool cv::gimpl::GStreamingExecutor::try_pull(cv::GRunArgsP &&outs) { if (state == State::STOPPED) @@ -1059,8 +1346,8 @@ bool cv::gimpl::GStreamingExecutor::try_pull(cv::GRunArgsP &&outs) return false; } - GAPI_Assert(cv::util::holds_alternative(cmd)); - cv::GRunArgs &this_result = cv::util::get(cmd); + GAPI_Assert(cv::util::holds_alternative(cmd)); + cv::GRunArgs &this_result = cv::util::get(cmd).args; sync_data(this_result, outs); return true; } diff --git a/modules/gapi/src/executor/gstreamingexecutor.hpp b/modules/gapi/src/executor/gstreamingexecutor.hpp index d10f9eddd0..b6093ac1ef 100644 --- a/modules/gapi/src/executor/gstreamingexecutor.hpp +++ b/modules/gapi/src/executor/gstreamingexecutor.hpp @@ -14,6 +14,8 @@ #include // unique_ptr, shared_ptr #include // thread +#include +#include #if defined(HAVE_TBB) # include // FIXME: drop it from here! @@ -22,6 +24,7 @@ template using QueueClass = tbb::concurrent_bounded_queue; # include "executor/conc_queue.hpp" template using QueueClass = cv::gapi::own::concurrent_bounded_queue; #endif // TBB +#include "executor/last_value.hpp" #include @@ -40,14 +43,61 @@ struct Stop { cv::GRunArg cdata; // const data for CNST stop }; +struct Result { + cv::GRunArgs args; // Full results vector + std::vector flags; // Availability flags (in case of desync) +}; + using Cmd = cv::util::variant < cv::util::monostate , Start // Tells emitters to start working. Not broadcasted to workers. , Stop // Tells emitters to stop working. Broadcasted to workers. , cv::GRunArg // Workers data payload to process. - , cv::GRunArgs // Full results vector + , Result // Pipeline's data for gout() >; -using Q = QueueClass; + +// Interface over a queue. The underlying queue implementation may be +// different. This class is mainly introduced to bring some +// abstraction over the real queues (bounded in-order) and a +// desynchronized data slots (see required to implement +// cv::gapi::desync) + +class Q { +public: + virtual void push(const Cmd &cmd) = 0; + virtual void pop(Cmd &cmd) = 0; + virtual bool try_pop(Cmd &cmd) = 0; + virtual void clear() = 0; + virtual ~Q() = default; +}; + +// A regular queue implementation +class SyncQueue final: public Q { + QueueClass m_q; // FIXME: OWN or WRAP?? + +public: + virtual void push(const Cmd &cmd) override { m_q.push(cmd); } + virtual void pop(Cmd &cmd) override { m_q.pop(cmd); } + virtual bool try_pop(Cmd &cmd) override { return m_q.try_pop(cmd); } + virtual void clear() override { m_q.clear(); } + + void set_capacity(std::size_t c) { m_q.set_capacity(c);} +}; + +// Desynchronized "queue" implementation +// Every push overwrites value which is not yet popped +// This container can hold 0 or 1 element +// Special handling for Stop is implemented (FIXME: not really) +class DesyncQueue final: public Q { + cv::gapi::own::last_written_value m_v; + +public: + virtual void push(const Cmd &cmd) override { m_v.push(cmd); } + virtual void pop(Cmd &cmd) override { m_v.pop(cmd); } + virtual bool try_pop(Cmd &cmd) override { return m_v.try_pop(cmd); } + virtual void clear() override { m_v.clear(); } +}; + } // namespace stream // FIXME: Currently all GExecutor comments apply also @@ -87,6 +137,7 @@ protected: util::optional m_reshapable; cv::gimpl::GIslandModel::Graph m_gim; // FIXME: make const? + const bool m_desync; // FIXME: Naive executor details are here for now // but then it should be moved to another place @@ -117,11 +168,27 @@ protected: std::vector m_sinks; std::vector m_threads; - std::vector m_emitter_queues; - std::vector m_const_emitter_queues; // a view over m_emitter_queues - std::vector m_sink_queues; - std::unordered_set m_internal_queues; - stream::Q m_out_queue; + std::vector m_emitter_queues; + + // a view over m_emitter_queues + std::vector m_const_emitter_queues; + + std::vector m_sink_queues; + + // desync path tags for outputs. -1 means that output + // doesn't belong to a desync path + std::vector m_sink_sync; + + std::unordered_set m_internal_queues; + stream::SyncQueue m_out_queue; + + // Describes mapping from desync paths to collector threads + struct CollectorThreadInfo { + std::vector queues; + std::vector mapping; + }; + std::unordered_map m_collector_map; + void wait_shutdown(); @@ -132,6 +199,7 @@ public: void setSource(GRunArgs &&args); void start(); bool pull(cv::GRunArgsP &&outs); + bool pull(cv::GOptRunArgsP &&outs); bool try_pull(cv::GRunArgsP &&outs); void stop(); bool running() const; diff --git a/modules/gapi/src/executor/last_value.hpp b/modules/gapi/src/executor/last_value.hpp new file mode 100644 index 0000000000..152449a879 --- /dev/null +++ b/modules/gapi/src/executor/last_value.hpp @@ -0,0 +1,105 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_EXECUTOR_LAST_VALUE_HPP +#define OPENCV_GAPI_EXECUTOR_LAST_VALUE_HPP + +#include +#include + +#include +#include + +namespace cv { +namespace gapi { +namespace own { + +// This class implements a "Last Written Value" thing. Writer threads +// (in our case, it is just one) can write as many values there as it +// can. +// +// The reader thread gets only a value it gets at the time (or blocks +// if there was no value written since the last read). +// +// Again, the implementation is highly inefficient right now. +template +class last_written_value { + cv::util::optional m_data; + + std::mutex m_mutex; + std::condition_variable m_cond_empty; + + void unsafe_pop(T &t); + +public: + last_written_value() {} + last_written_value(const last_written_value &cc) + : m_data(cc.m_data) { + // FIXME: what to do with all that locks, etc? + } + last_written_value(last_written_value &&cc) + : m_data(std::move(cc.m_data)) { + // FIXME: what to do with all that locks, etc? + } + + // FIXME: && versions + void push(const T &t); + void pop(T &t); + bool try_pop(T &t); + + // Not thread-safe + void clear(); +}; + +// Internal: do shared pop things assuming the lock is already there +template +void last_written_value::unsafe_pop(T &t) { + GAPI_Assert(m_data.has_value()); + t = std::move(m_data.value()); + m_data.reset(); +} + +// Push an element to the queue. Blocking if there's no space left +template +void last_written_value::push(const T& t) { + std::unique_lock lock(m_mutex); + m_data = cv::util::make_optional(t); + lock.unlock(); + m_cond_empty.notify_one(); +} + +// Pop an element from the queue. Blocking if there's no items +template +void last_written_value::pop(T &t) { + std::unique_lock lock(m_mutex); + if (!m_data.has_value()) { + // if there is no data, wait + m_cond_empty.wait(lock, [&](){return m_data.has_value();}); + } + unsafe_pop(t); +} + +// Try pop an element from the queue. Returns false if queue is empty +template +bool last_written_value::try_pop(T &t) { + std::unique_lock lock(m_mutex); + if (!m_data.has_value()) { + // if there is no data, return + return false; + } + unsafe_pop(t); + return true; +} + +// Clear the value holder. This method is not thread-safe. +template +void last_written_value::clear() { + m_data.reset(); +} + +}}} // namespace cv::gapi::own + +#endif // OPENCV_GAPI_EXECUTOR_CONC_QUEUE_HPP diff --git a/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp b/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp index f6543e59f7..c9d9926542 100644 --- a/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp +++ b/modules/gapi/test/internal/gapi_int_gmodel_builder_test.cpp @@ -2,7 +2,7 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018-2020 Intel Corporation #include "../test_precomp.hpp" @@ -29,7 +29,9 @@ namespace , "" , nullptr , { GShape::GMAT } - , { D::OpaqueKind::CV_UNKNOWN } }).pass(m).yield(0); + , { D::OpaqueKind::CV_UNKNOWN } + , { cv::detail::HostCtor{cv::util::monostate{}} } + }).pass(m).yield(0); } cv::GMat binaryOp(cv::GMat m1, cv::GMat m2) @@ -38,7 +40,9 @@ namespace , "" , nullptr , { GShape::GMAT } - , { D::OpaqueKind::CV_UNKNOWN, D::OpaqueKind::CV_UNKNOWN } }).pass(m1, m2).yield(0); + , { D::OpaqueKind::CV_UNKNOWN, D::OpaqueKind::CV_UNKNOWN } + , { cv::detail::HostCtor{cv::util::monostate{}} } + }).pass(m1, m2).yield(0); } std::vector collectOperations(const cv::gimpl::GModel::Graph& gr) diff --git a/modules/gapi/test/internal/gapi_int_island_fusion_tests.cpp b/modules/gapi/test/internal/gapi_int_island_fusion_tests.cpp index c247cc7b79..723e42a6df 100644 --- a/modules/gapi/test/internal/gapi_int_island_fusion_tests.cpp +++ b/modules/gapi/test/internal/gapi_int_island_fusion_tests.cpp @@ -513,7 +513,65 @@ TEST(IslandFusion, Regression_ShouldFuseAll) EXPECT_EQ(1u, isl_nhs.size()); // 1 island } -// FIXME: add more tests on mixed (hetero) graphs +TEST(IslandFusion, Test_Desync_NoFuse) +{ + cv::GMat in; + cv::GMat tmp1 = in*0.5f; + cv::GMat tmp2 = tmp1 + in; + + cv::GMat tmp3 = cv::gapi::streaming::desync(tmp1); + cv::GMat tmp4 = tmp3*0.1f; + + const auto in_meta = cv::GMetaArg(cv::GMatDesc{CV_8U,1,cv::Size(32,32)}); + cv::GComputation comp(cv::GIn(in), cv::GOut(tmp2, tmp4)); + + ////////////////////////////////////////////////////////////////// + // Compile the graph in "regular" mode, it should produce a single island + { + using namespace cv::gimpl; + + GCompiler compiler(comp, {in_meta}, cv::compile_args()); + GCompiler::GPtr graph = compiler.generateGraph(); + compiler.runPasses(*graph); + + auto isl_model = GModel::ConstGraph(*graph).metadata() + .get().model; + GIslandModel::ConstGraph gim(*isl_model); + + const auto is_island = [&](ade::NodeHandle nh) { + return (NodeKind::ISLAND == gim.metadata(nh).get().k); + }; + const auto num_isl = std::count_if(gim.nodes().begin(), + gim.nodes().end(), + is_island); + EXPECT_EQ(1, num_isl); + } + ////////////////////////////////////////////////////////////////// + // Now compile the graph in the streaming mode. + // It has to produce two islands + { + using namespace cv::gimpl; + + GCompiler compiler(comp, {in_meta}, cv::compile_args()); + GCompiler::GPtr graph = compiler.generateGraph(); + GModel::Graph(*graph).metadata().set(Streaming{}); + compiler.runPasses(*graph); + + auto isl_model = GModel::ConstGraph(*graph).metadata() + .get().model; + GIslandModel::ConstGraph gim(*isl_model); + + const auto is_island = [&](ade::NodeHandle nh) { + return (NodeKind::ISLAND == gim.metadata(nh).get().k); + }; + const auto num_isl = std::count_if(gim.nodes().begin(), + gim.nodes().end(), + is_island); + EXPECT_EQ(2, num_isl); + } +} + +// Fixme: add more tests on mixed (hetero) graphs // ADE-222, ADE-223 // FIXME: add test on combination of user-specified island diff --git a/modules/gapi/test/internal/gapi_transactions_test.cpp b/modules/gapi/test/internal/gapi_transactions_test.cpp index ac77c33d13..9d36401a71 100644 --- a/modules/gapi/test/internal/gapi_transactions_test.cpp +++ b/modules/gapi/test/internal/gapi_transactions_test.cpp @@ -2,11 +2,14 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2018 Intel Corporation +// Copyright (C) 2018 - 2020 Intel Corporation #include "../test_precomp.hpp" + #include +#include + #include "compiler/transactions.hpp" namespace opencv_test @@ -33,10 +36,11 @@ struct SimpleGraph enum { node_nums = 5 }; ade::Graph graph; - ade::NodeHandle fused_nh; /* For check that fusion node is connected to the - inputs of the prod and the outputs of the cons */ + ade::NodeHandle fused_nh; // For check that fusion node is connected to the + // inputs of the prod and the outputs of the cons std::array nhs; std::array ehs; + using Change = ChangeT<>; Change::List changes; SimpleGraph() @@ -192,8 +196,6 @@ TEST_F(Transactions, DropNode_Commit) TEST_F(Transactions, Fusion_Commit) { - namespace C = Change; - fuse(); commit(); @@ -204,8 +206,6 @@ TEST_F(Transactions, Fusion_Commit) TEST_F(Transactions, Fusion_RollBack) { - namespace C = Change; - fuse(); rollback(); @@ -219,4 +219,151 @@ TEST_F(Transactions, Fusion_RollBack) } } +namespace +{ + struct MetaInt { + static const char *name() { return "int_meta"; } + int x; + }; + + struct MetaStr { + static const char *name() { return "string_meta"; } + std::string s; + }; +} + +TEST(PreservedMeta, TestMetaCopy_Full) +{ + ade::Graph g; + ade::TypedGraph tg(g); + + auto src_nh = tg.createNode(); + tg.metadata(src_nh).set(MetaInt{42}); + tg.metadata(src_nh).set(MetaStr{"hi"}); + + auto dst_nh = tg.createNode(); + + EXPECT_FALSE(tg.metadata(dst_nh).contains()); + EXPECT_FALSE(tg.metadata(dst_nh).contains()); + + // Here we specify all the meta types we know about the src node + // Assume Preserved copies its all for us + Preserved(g, src_nh).copyTo(g, dst_nh); + + ASSERT_TRUE(tg.metadata(dst_nh).contains()); + ASSERT_TRUE(tg.metadata(dst_nh).contains()); + + EXPECT_EQ(42, tg.metadata(dst_nh).get().x); + EXPECT_EQ("hi", tg.metadata(dst_nh).get().s); +} + + +TEST(PreservedMeta, TestMetaCopy_Partial_Dst) +{ + ade::Graph g; + ade::TypedGraph tg(g); + + auto tmp_nh1 = tg.createNode(); + auto tmp_nh2 = tg.createNode(); + auto src_eh = tg.link(tmp_nh1, tmp_nh2); + + tg.metadata(src_eh).set(MetaInt{42}); + tg.metadata(src_eh).set(MetaStr{"hi"}); + + auto tmp_nh3 = tg.createNode(); + auto tmp_nh4 = tg.createNode(); + auto dst_eh = tg.link(tmp_nh3, tmp_nh4); + + EXPECT_FALSE(tg.metadata(dst_eh).contains()); + EXPECT_FALSE(tg.metadata(dst_eh).contains()); + + // Here we specify just a single meta type for the src node + // Assume Preserved copies only this type and nothing else + Preserved(g, src_eh).copyTo(g, dst_eh); + + ASSERT_FALSE(tg.metadata(dst_eh).contains()); + ASSERT_TRUE (tg.metadata(dst_eh).contains()); + + EXPECT_EQ("hi", tg.metadata(dst_eh).get().s); +} + +TEST(PreservedMeta, TestMetaCopy_Partial_Src) +{ + ade::Graph g; + ade::TypedGraph tg(g); + + auto src_nh = tg.createNode(); + tg.metadata(src_nh).set(MetaInt{42}); + + auto dst_nh = tg.createNode(); + + EXPECT_FALSE(tg.metadata(dst_nh).contains()); + EXPECT_FALSE(tg.metadata(dst_nh).contains()); + + // Here we specify all the meta types we know about the src node + // but the src node has just one of them. + // A valid situation, only MetaInt to be copied. + Preserved(g, src_nh).copyTo(g, dst_nh); + + ASSERT_TRUE (tg.metadata(dst_nh).contains()); + ASSERT_FALSE(tg.metadata(dst_nh).contains()); + + EXPECT_EQ(42, tg.metadata(dst_nh).get().x); +} + +TEST(PreservedMeta, TestMetaCopy_Nothing) +{ + ade::Graph g; + ade::TypedGraph tg(g); + + auto src_nh = tg.createNode(); + auto dst_nh = tg.createNode(); + + EXPECT_FALSE(tg.metadata(src_nh).contains()); + EXPECT_FALSE(tg.metadata(src_nh).contains()); + + EXPECT_FALSE(tg.metadata(dst_nh).contains()); + EXPECT_FALSE(tg.metadata(dst_nh).contains()); + + // Here we specify all the meta types we know about the src node + // but the src node has none of those. See how it works now + Preserved(g, src_nh).copyTo(g, dst_nh); + + ASSERT_FALSE(tg.metadata(dst_nh).contains()); + ASSERT_FALSE(tg.metadata(dst_nh).contains()); +} + +TEST(PreservedMeta, DropEdge) +{ + ade::Graph g; + ade::TypedGraph tg(g); + + auto nh1 = tg.createNode(); + auto nh2 = tg.createNode(); + auto eh = tg.link(nh1, nh2); + + tg.metadata(eh).set(MetaInt{42}); + tg.metadata(eh).set(MetaStr{"hi"}); + + // Drop an edge using the transaction API + using Change = ChangeT; + Change::List changes; + changes.enqueue(g, nh1, eh); + + EXPECT_EQ(0u, nh1->outNodes().size()); + EXPECT_EQ(nullptr, eh); + + // Now restore the edge and check if it's meta was restored + changes.rollback(g); + + ASSERT_EQ(1u, nh1->outNodes().size()); + eh = *nh1->outEdges().begin(); + + ASSERT_TRUE(tg.metadata(eh).contains()); + ASSERT_TRUE(tg.metadata(eh).contains()); + + EXPECT_EQ(42, tg.metadata(eh).get().x); + EXPECT_EQ("hi", tg.metadata(eh).get().s); +} + } // opencv_test diff --git a/modules/gapi/test/own/conc_queue_tests.cpp b/modules/gapi/test/own/conc_queue_tests.cpp index c3e6fd6e08..6e268f318c 100644 --- a/modules/gapi/test/own/conc_queue_tests.cpp +++ b/modules/gapi/test/own/conc_queue_tests.cpp @@ -55,7 +55,7 @@ TEST(ConcQueue, Clear) EXPECT_FALSE(q.try_pop(x)); } -// In this test, every writer thread produce its own range of integer +// In this test, every writer thread produces its own range of integer // numbers, writing those to a shared queue. // // Every reader thread pops elements from the queue (until -1 is @@ -64,12 +64,12 @@ TEST(ConcQueue, Clear) // Finally, the master thread waits for completion of all other // threads and verifies that all the necessary data is // produced/obtained. +namespace +{ using StressParam = std::tuple; // Queue capacity -namespace -{ constexpr int STOP_SIGN = -1; constexpr int BASE = 1000; } diff --git a/modules/gapi/test/own/last_written_value_tests.cpp b/modules/gapi/test/own/last_written_value_tests.cpp new file mode 100644 index 0000000000..4bfb27f15f --- /dev/null +++ b/modules/gapi/test/own/last_written_value_tests.cpp @@ -0,0 +1,156 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "../test_precomp.hpp" + +#include +#include + +#include "executor/last_value.hpp" + +namespace opencv_test { +using namespace cv::gapi; + +TEST(LastValue, PushPop) { + own::last_written_value v; + for (int i = 0; i < 100; i++) { + v.push(i); + + int x = 1; + v.pop(x); + EXPECT_EQ(x, i); + } +} + +TEST(LastValue, TryPop) { + own::last_written_value v; + int x = 0; + EXPECT_FALSE(v.try_pop(x)); + + v.push(1); + EXPECT_TRUE(v.try_pop(x)); + EXPECT_EQ(1, x); +} + +TEST(LastValue, Clear) { + own::last_written_value v; + v.push(42); + v.clear(); + + int x = 0; + EXPECT_FALSE(v.try_pop(x)); +} + +TEST(LastValue, Overwrite) { + own::last_written_value v; + v.push(42); + v.push(0); + + int x = -1; + EXPECT_TRUE(v.try_pop(x)); + EXPECT_EQ(0, x); +} + +// In this test, every writer thread produces its own range of integer +// numbers, writing those to a shared queue. +// +// Every reader thread pops elements from the queue (until -1 is +// reached) and stores those in its own associated set. +// +// Finally, the master thread waits for completion of all other +// threads and verifies that all the necessary data is +// produced/obtained. +namespace { +using StressParam = std::tuple; // Num reader threads +constexpr int STOP_SIGN = -1; +constexpr int BASE = 1000; +} +struct LastValue_: public ::testing::TestWithParam { + using V = own::last_written_value; + using S = std::unordered_set; + + static void writer(int base, int writes, V& v) { + for (int i = 0; i < writes; i++) { + if (i % 2) { + std::this_thread::sleep_for(std::chrono::milliseconds{1}); + } + v.push(base + i); + } + v.push(STOP_SIGN); + } + + static void reader(V& v, S& s) { + int x = 0; + while (true) { + v.pop(x); + if (x == STOP_SIGN) { + // If this thread was lucky enough to read this STOP_SIGN, + // push it back to v to make other possible readers able + // to read it again (note due to the last_written_value + // semantic, those STOP_SIGN could be simply lost i.e. + // overwritten. + v.push(STOP_SIGN); + return; + } + s.insert(x); + } + } +}; + +TEST_P(LastValue_, Test) +{ + int num_writers = 0; + int num_writes = 0; + int num_readers = 0; + std::tie(num_writers, num_writes, num_readers) = GetParam(); + + CV_Assert(num_writers < 20); + CV_Assert(num_writes < BASE); + + V v; + + // Start reader threads + std::vector storage(num_readers); + std::vector readers; + for (S& s : storage) { + readers.emplace_back(reader, std::ref(v), std::ref(s)); + } + + // Start writer threads, also pre-generate reference numbers + S reference; + std::vector writers; + for (int w = 0; w < num_writers; w++) { + writers.emplace_back(writer, w*BASE, num_writes, std::ref(v)); + for (int r = 0; r < num_writes; r++) { + reference.insert(w*BASE + r); + } + } + + // Wait for completions + for (auto &t : readers) t.join(); + for (auto &t : writers) t.join(); + + // Validate the result. Some values are read, and the values are + // correct (i.e. such values have been written) + std::size_t num_values_read = 0u; + for (const auto &s : storage) { + num_values_read += s.size(); + for (auto &x : s) { + EXPECT_TRUE(reference.count(x) > 0); + } + } + // NOTE: Some combinations may end-up in 0 values read + // it is normal, the main thing is that the test shouldn't hang! + EXPECT_LE(0u, num_values_read); +} + +INSTANTIATE_TEST_CASE_P(LastValueStress, LastValue_, + Combine( Values(1, 2, 4, 8, 16) // writers + , Values(32, 96, 256) // writes + , Values(1, 2, 10))); // readers +} // namespace opencv_test diff --git a/modules/gapi/test/streaming/gapi_streaming_tests.cpp b/modules/gapi/test/streaming/gapi_streaming_tests.cpp index dfd2331bfd..69b85c0d34 100644 --- a/modules/gapi/test/streaming/gapi_streaming_tests.cpp +++ b/modules/gapi/test/streaming/gapi_streaming_tests.cpp @@ -2,11 +2,13 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html. // -// Copyright (C) 2019 Intel Corporation +// Copyright (C) 2019-2020 Intel Corporation #include "../test_precomp.hpp" +#include // sleep_for (Delay) + #include #include @@ -18,6 +20,7 @@ #include #include +#include namespace opencv_test { @@ -100,6 +103,16 @@ struct GAPI_Streaming: public ::testing::TestWithParam { } }; +G_API_OP(Delay, , "org.opencv.test.delay") { + static cv::GMatDesc outMeta(const cv::GMatDesc &in, int) { return in; } +}; +GAPI_OCV_KERNEL(OCVDelay, Delay) { + static void run(const cv::Mat &in, int ms, cv::Mat &out) { + std::this_thread::sleep_for(std::chrono::milliseconds{ms}); + in.copyTo(out); + } +}; + } // anonymous namespace TEST_P(GAPI_Streaming, SmokeTest_ConstInput_GMat) @@ -794,6 +807,104 @@ TEST(GAPI_Streaming_Types, OutputVector) EXPECT_LT(0u, num_frames); } +G_API_OP(DimsChans, + , cv::GOpaque>(cv::GMat)>, + "test.streaming.dims_chans") { + static std::tuple outMeta(const cv::GMatDesc &) { + return std::make_tuple(cv::empty_array_desc(), + cv::empty_gopaque_desc()); + } +}; + +GAPI_OCV_KERNEL(OCVDimsChans, DimsChans) { + static void run(const cv::Mat &in, std::vector &ov, int &oi) { + ov = {in.cols, in.rows}; + oi = in.channels(); + } +}; + +struct GAPI_Streaming_TemplateTypes: ::testing::Test { + // There was a problem in GStreamingExecutor + // when outputs were formally not used by the graph + // but still should be in place as operation need + // to produce them, and host data type constructors + // were missing for GArray and GOpaque in this case. + // This test tests exactly this. + + GAPI_Streaming_TemplateTypes() { + // Prepare everything for the test: + // Graph itself + blur = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + cv::GMat blur_d = cv::gapi::streaming::desync(blur); + std::tie(vec, opq) = DimsChans::on(blur_d); + + // Kernel package + pkg = cv::gapi::kernels(); + + // Input mat + in_mat = cv::Mat::eye(cv::Size(320,240), CV_8UC3); + } + + cv::GMat in; + cv::GMat blur; + cv::GArray vec; + cv::GOpaque opq; + cv::gapi::GKernelPackage pkg; + cv::Mat in_mat; +}; + +TEST_F(GAPI_Streaming_TemplateTypes, UnusedVectorIsOK) +{ + // Declare graph without listing vec as output + auto sc = cv::GComputation(cv::GIn(in), cv::GOut(blur, opq)) + .compileStreaming(cv::compile_args(pkg)); + sc.setSource(cv::gin(in_mat)); + sc.start(); + + cv::optional out_mat; + cv::optional out_int; + + int counter = 0; + while (sc.pull(cv::gout(out_mat, out_int))) { + if (counter++ == 10) { + // Stop the test after 10 iterations + sc.stop(); + break; + } + GAPI_Assert(out_mat || out_int); + if (out_int) { + EXPECT_EQ( 3, out_int.value()); + } + } +} + +TEST_F(GAPI_Streaming_TemplateTypes, UnusedOpaqueIsOK) +{ + // Declare graph without listing opq as output + auto sc = cv::GComputation(cv::GIn(in), cv::GOut(blur, vec)) + .compileStreaming(cv::compile_args(pkg)); + sc.setSource(cv::gin(in_mat)); + sc.start(); + + cv::optional out_mat; + cv::optional > out_vec; + + int counter = 0; + while (sc.pull(cv::gout(out_mat, out_vec))) { + if (counter++ == 10) { + // Stop the test after 10 iterations + sc.stop(); + break; + } + GAPI_Assert(out_mat || out_vec); + if (out_vec) { + EXPECT_EQ(320, out_vec.value()[0]); + EXPECT_EQ(240, out_vec.value()[1]); + } + } +} + struct GAPI_Streaming_Unit: public ::testing::Test { cv::Mat m; @@ -882,7 +993,7 @@ TEST_F(GAPI_Streaming_Unit, StartStopStart_NoSetSource) EXPECT_NO_THROW(sc.setSource(cv::gin(m, m))); EXPECT_NO_THROW(sc.start()); EXPECT_NO_THROW(sc.stop()); - EXPECT_ANY_THROW(sc.start()); // Should fails since setSource was not called + EXPECT_ANY_THROW(sc.start()); // Should fail since setSource was not called } TEST_F(GAPI_Streaming_Unit, StartStopStress_Const) @@ -1018,4 +1129,380 @@ TEST(Streaming, Python_Pull_Overload) EXPECT_FALSE(ccomp.running()); } +TEST(GAPI_Streaming_Desync, SmokeTest_Regular) +{ + cv::GMat in; + cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + cv::GMat out1 = cv::gapi::Canny(tmp1, 32, 128, 3); + + // FIXME: Unary desync should not require tie! + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1); + cv::GMat out2 = tmp2 / cv::gapi::Sobel(tmp2, CV_8U, 1, 1);; + + cv::Mat test_in = cv::Mat::eye(cv::Size(32,32), CV_8UC3); + cv::Mat test_out1, test_out2; + cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)) + .apply(cv::gin(test_in), cv::gout(test_out1, test_out2)); +} + +TEST(GAPI_Streaming_Desync, SmokeTest_Streaming) +{ + initTestDataPath(); + + cv::GMat in; + cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + cv::GMat out1 = cv::gapi::Canny(tmp1, 32, 128, 3); + + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1); + cv::GMat out2 = Delay::on(tmp2,10) / cv::gapi::Sobel(tmp2, CV_8U, 1, 1); + + auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)) + .compileStreaming(cv::compile_args(cv::gapi::kernels())); + auto sc_file = findDataFile("cv/video/768x576.avi"); + auto sc_src = gapi::wip::make_src(sc_file); + sc.setSource(cv::gin(sc_src)); + sc.start(); + + std::size_t out1_hits = 0u; + std::size_t out2_hits = 0u; + cv::optional test_out1, test_out2; + while (sc.pull(cv::gout(test_out1, test_out2))) { + GAPI_Assert(test_out1 || test_out2); + if (test_out1) out1_hits++; + if (test_out2) out2_hits++; + } + EXPECT_EQ(100u, out1_hits); // out1 must be available for all frames + EXPECT_LE(out2_hits, out1_hits); // out2 must appear less times than out1 + std::cout << "Got " << out1_hits << " out1's and " << out2_hits << " out2's" << std::endl; +} + +TEST(GAPI_Streaming_Desync, SmokeTest_Streaming_TwoParts) +{ + initTestDataPath(); + + cv::GMat in; + cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + cv::GMat out1 = cv::gapi::Canny(tmp1, 32, 128, 3); + + // Desynchronized path 1 + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1); + cv::GMat out2 = tmp2 / cv::gapi::Sobel(tmp2, CV_8U, 1, 1); + + // Desynchronized path 2 + cv::GMat tmp3 = cv::gapi::streaming::desync(tmp1); + cv::GMat out3 = 0.5*tmp3 + 0.5*cv::gapi::medianBlur(tmp3, 7); + + // The code should compile and execute well (desynchronized parts don't cross) + auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2, out3)) + .compileStreaming(); + auto sc_file = findDataFile("cv/video/768x576.avi"); + auto sc_src = gapi::wip::make_src(sc_file); + sc.setSource(cv::gin(sc_src)); + sc.start(); + + std::size_t test_frames = 0u; + cv::optional test_out1, test_out2, test_out3; + while (sc.pull(cv::gout(test_out1, test_out2, test_out3))) { + GAPI_Assert(test_out1 || test_out2 || test_out3); + if (test_out1) { + // count frames only for synchronized output + test_frames++; + } + } + EXPECT_EQ(100u, test_frames); +} + +TEST(GAPI_Streaming_Desync, Negative_NestedDesync_Tier0) +{ + cv::GMat in; + cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + // Desynchronized path 1 + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1); + cv::GMat out1 = cv::gapi::medianBlur(tmp2, 3); + + // Desynchronized path 2, nested from 1 (directly from desync) + cv::GMat tmp3 = cv::gapi::streaming::desync(tmp2); + cv::GMat out2 = 0.5*tmp3; + + // This shouldn't compile + EXPECT_ANY_THROW(cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)) + .compileStreaming()); +} + +TEST(GAPI_Streaming_Desync, Negative_NestedDesync_Tier1) +{ + cv::GMat in; + cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + // Desynchronized path 1 + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1); + cv::GMat out1 = cv::gapi::medianBlur(tmp2, 3); + + // Desynchronized path 2, nested from 1 (indirectly from desync) + cv::GMat tmp3 = cv::gapi::streaming::desync(out1); + cv::GMat out2 = 0.5*tmp3; + + // This shouldn't compile + EXPECT_ANY_THROW(cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)) + .compileStreaming()); +} + +TEST(GAPI_Streaming_Desync, Negative_CrossMainPart_Tier0) +{ + cv::GMat in; + cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + // Desynchronized path: depends on both tmp1 and tmp2 + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1); + cv::GMat out1 = 0.5*tmp1 + 0.5*tmp2; + + // This shouldn't compile + EXPECT_ANY_THROW(cv::GComputation(in, out1).compileStreaming()); +} + +TEST(GAPI_Streaming_Desync, Negative_CrossMainPart_Tier1) +{ + cv::GMat in; + cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + // Desynchronized path: depends on both tmp1 and tmp2 + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1); + cv::GMat out1 = 0.5*tmp1 + 0.5*cv::gapi::medianBlur(tmp2, 3); + + // This shouldn't compile + EXPECT_ANY_THROW(cv::GComputation(in, out1).compileStreaming()); +} + +TEST(GAPI_Streaming_Desync, Negative_CrossOtherDesync_Tier0) +{ + cv::GMat in; + cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + // Desynchronized path 1 + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1); + cv::GMat out1 = 0.5*tmp2; + + // Desynchronized path 2 (depends on 1) + cv::GMat tmp3 = cv::gapi::streaming::desync(tmp1); + cv::GMat out2 = 0.5*tmp3 + tmp2; + + // This shouldn't compile + EXPECT_ANY_THROW(cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)) + .compileStreaming()); +} + +TEST(GAPI_Streaming_Desync, Negative_CrossOtherDesync_Tier1) +{ + cv::GMat in; + cv::GMat tmp1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + // Desynchronized path 1 + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp1); + cv::GMat out1 = 0.5*tmp2; + + // Desynchronized path 2 (depends on 1) + cv::GMat tmp3 = cv::gapi::streaming::desync(tmp1); + cv::GMat out2 = 0.5*cv::gapi::medianBlur(tmp3,3) + 1.0*tmp2; + + // This shouldn't compile + EXPECT_ANY_THROW(cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)) + .compileStreaming()); +} + +TEST(GAPI_Streaming_Desync, Negative_SynchronizedPull) +{ + initTestDataPath(); + + cv::GMat in; + cv::GMat out1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + cv::GMat tmp1 = cv::gapi::streaming::desync(out1); + cv::GMat out2 = 0.5*tmp1; + + auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)) + .compileStreaming(); + + auto sc_file = findDataFile("cv/video/768x576.avi"); + auto sc_src = gapi::wip::make_src(sc_file); + sc.setSource(cv::gin(sc_src)); + sc.start(); + + cv::Mat o1, o2; + EXPECT_ANY_THROW(sc.pull(cv::gout(o1, o2))); +} + +TEST(GAPI_Streaming_Desync, UseSpecialPull) +{ + initTestDataPath(); + + cv::GMat in; + cv::GMat out1 = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + cv::GMat tmp1 = cv::gapi::streaming::desync(out1); + cv::GMat out2 = 0.5*tmp1; + + auto sc = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)) + .compileStreaming(); + + auto sc_file = findDataFile("cv/video/768x576.avi"); + auto sc_src = gapi::wip::make_src(sc_file); + sc.setSource(cv::gin(sc_src)); + sc.start(); + + cv::optional o1, o2; + std::size_t num_frames = 0u; + + while (sc.pull(cv::gout(o1, o2))) { + if (o1) num_frames++; + } + EXPECT_EQ(100u, num_frames); +} + +G_API_OP(ProduceVector, (cv::GMat)>, "test.desync.vector") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &) { + return cv::empty_array_desc(); + } +}; + +G_API_OP(ProduceOpaque, (cv::GMat)>, "test.desync.opaque") { + static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) { + return cv::empty_gopaque_desc(); + } +}; + +GAPI_OCV_KERNEL(OCVVector, ProduceVector) { + static void run(const cv::Mat& in, std::vector &out) { + out = {in.cols, in.rows}; + } +}; + +GAPI_OCV_KERNEL(OCVOpaque, ProduceOpaque) { + static void run(const cv::Mat &in, int &v) { + v = in.channels(); + } +}; + +namespace { +cv::GStreamingCompiled desyncTestObject() { + cv::GMat in; + cv::GMat blur = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + cv::GMat blur_d = cv::gapi::copy(cv::gapi::streaming::desync(blur)); + cv::GMat d1 = Delay::on(blur_d, 10); + cv::GMat d2 = Delay::on(blur_d, 30); + + cv::GArray vec = ProduceVector::on(d1); + cv::GOpaque opq = ProduceOpaque::on(d2); + + auto pkg = cv::gapi::kernels(); + return cv::GComputation(cv::GIn(in), cv::GOut(blur, vec, opq)) + .compileStreaming(cv::compile_args(pkg)); +} +} // anonymous namespace + +TEST(GAPI_Streaming_Desync, MultipleDesyncOutputs_1) { + auto sc = desyncTestObject(); + const cv::Mat in_mat = cv::Mat::eye(cv::Size(320,240), CV_8UC3); + + sc.setSource(cv::gin(in_mat)); + sc.start(); + + cv::optional out_mat; + cv::optional > out_vec; + cv::optional out_int; + + int counter = 0; + while (sc.pull(cv::gout(out_mat, out_vec, out_int))) { + if (counter++ == 1000) { + // Stop the test after 1000 iterations + sc.stop(); + break; + } + GAPI_Assert(out_mat || out_vec || out_int); + + // out_vec and out_int are on the same desynchronized path + // they MUST arrive together. If one is available, the other + // also must be available. + if (out_vec) { ASSERT_TRUE(out_int.has_value()); } + if (out_int) { ASSERT_TRUE(out_vec.has_value()); } + + if (out_vec || out_int) { + EXPECT_EQ(320, out_vec.value()[0]); + EXPECT_EQ(240, out_vec.value()[1]); + EXPECT_EQ( 3, out_int.value()); + } + } +} + +TEST(GAPI_Streaming_Desync, StartStop_Stress) { + auto sc = desyncTestObject(); + const cv::Mat in_mat = cv::Mat::eye(cv::Size(320,240), CV_8UC3); + + cv::optional out_mat; + cv::optional > out_vec; + cv::optional out_int; + + for (int i = 0; i < 10; i++) { + sc.setSource(cv::gin(in_mat)); + sc.start(); + int counter = 0; + while (counter++ < 100) { + sc.pull(cv::gout(out_mat, out_vec, out_int)); + GAPI_Assert(out_mat || out_vec || out_int); + if (out_vec) { ASSERT_TRUE(out_int.has_value()); } + if (out_int) { ASSERT_TRUE(out_vec.has_value()); } + } + sc.stop(); + } +} + +GAPI_FLUID_KERNEL(FluidCopy, cv::gapi::core::GCopy, false) { + static const int Window = 1; + + static void run(const cv::gapi::fluid::View &in, + cv::gapi::fluid::Buffer &out) { + const uint8_t *in_ptr = in.InLineB(0); + uint8_t *out_ptr = out.OutLineB(0); + + const auto in_type = CV_MAKETYPE(in.meta().depth, in.meta().chan); + const auto out_type = CV_MAKETYPE(out.meta().depth, out.meta().chan); + GAPI_Assert(in_type == out_type); + std::copy_n(in_ptr, in.length()*CV_ELEM_SIZE(in_type), out_ptr); + } +}; + + +TEST(GAPI_Streaming_Desync, DesyncObjectConsumedByTwoIslandsViaSeparateDesync) { + // See comment in the implementation of cv::gapi::streaming::desync (.cpp) + cv::GMat in; + cv::GMat tmp = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + cv::GMat tmp1 = cv::gapi::streaming::desync(tmp); + cv::GMat out1 = cv::gapi::copy(tmp1); // ran via Fluid backend + + cv::GMat tmp2 = cv::gapi::streaming::desync(tmp); + cv::GMat out2 = tmp2 * 0.5; // ran via OCV backend + + auto c = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)); + auto p = cv::gapi::kernels(); + + EXPECT_NO_THROW(c.compileStreaming(cv::compile_args(p))); +} + +TEST(GAPI_Streaming_Desync, DesyncObjectConsumedByTwoIslandsViaSameDesync) { + // See comment in the implementation of cv::gapi::streaming::desync (.cpp) + cv::GMat in; + cv::GMat tmp = cv::gapi::boxFilter(in, -1, cv::Size(3,3)); + + cv::GMat tmp1 = cv::gapi::streaming::desync(tmp); + cv::GMat out1 = cv::gapi::copy(tmp1); // ran via Fluid backend + cv::GMat out2 = out1 - 0.5*tmp1; // ran via OCV backend + + auto c = cv::GComputation(cv::GIn(in), cv::GOut(out1, out2)); + auto p = cv::gapi::kernels(); + + EXPECT_NO_THROW(c.compileStreaming(cv::compile_args(p))); +} + } // namespace opencv_test From 48ccbe39b4024190c2be362f800363ad5160f912 Mon Sep 17 00:00:00 2001 From: AsyaPronina <155jj@mail.ru> Date: Mon, 2 Nov 2020 18:54:19 +0300 Subject: [PATCH 060/152] Changed behaviour of cv::gapi::serialize, cv::gapi::deserialize for GCompileArgs - cv::gapi::serialize bypasses compile arguments which have no S11N specialization with serialize/deserialize callbacks for underlying types - cv::gapi::deserialize can accept arbitraty number of serialized compile args in a stream but will return only those which are requested by user via template parameter pack if they are presented in the stream. If some or all of them are not presented cv::gapi::deserialize will ignore and return only those which are presented - cv::gapi::deserialize can accept only types which can be deserialized (have S11N specialization with the user callbacks) - Added cv::gapi::s11n::detail::has_S11N_spec trait to separate compile arguments which have S11N specialization with the user callbacks --- modules/gapi/include/opencv2/gapi/gcommon.hpp | 13 +- modules/gapi/include/opencv2/gapi/s11n.hpp | 48 +++- .../gapi/include/opencv2/gapi/s11n/base.hpp | 16 +- .../src/backends/common/serialization.cpp | 7 +- modules/gapi/test/s11n/gapi_s11n_tests.cpp | 271 ++++++++++++++++-- 5 files changed, 310 insertions(+), 45 deletions(-) diff --git a/modules/gapi/include/opencv2/gapi/gcommon.hpp b/modules/gapi/include/opencv2/gapi/gcommon.hpp index 2b260ed07c..0242020f6a 100644 --- a/modules/gapi/include/opencv2/gapi/gcommon.hpp +++ b/modules/gapi/include/opencv2/gapi/gcommon.hpp @@ -161,7 +161,9 @@ public: template::value, int>::type = 0> explicit GCompileArg(T &&t) : tag(detail::CompileArgTag::type>::tag()) - , serializeF(&cv::gapi::s11n::detail::wrap_serialize::serialize) + , serializeF(cv::gapi::s11n::detail::has_S11N_spec::value ? + &cv::gapi::s11n::detail::wrap_serialize::serialize : + nullptr) , arg(t) { } @@ -178,7 +180,10 @@ public: void serialize(cv::gapi::s11n::IOStream& os) const { - serializeF(os, *this); + if (serializeF) + { + serializeF(os, *this); + } } private: @@ -222,8 +227,8 @@ template struct wrap_serialize { static void serialize(IOStream& os, const GCompileArg& arg) { - using decayed_type = typename std::decay::type; - S11N::serialize(os, arg.get()); + using DT = typename std::decay::type; + S11N

::serialize(os, arg.get
()); } }; } // namespace detail diff --git a/modules/gapi/include/opencv2/gapi/s11n.hpp b/modules/gapi/include/opencv2/gapi/s11n.hpp index 2fa4e51176..0e3e382328 100644 --- a/modules/gapi/include/opencv2/gapi/s11n.hpp +++ b/modules/gapi/include/opencv2/gapi/s11n.hpp @@ -265,23 +265,25 @@ void getRunArgByIdx (IIStream& is, cv::util::variant &v, uint32_t idx) { namespace detail { -template struct deserialize_arg; +template struct try_deserialize_comparg; -template<> struct deserialize_arg> { -static GCompileArg exec(cv::gapi::s11n::IIStream&, const std::string&) { - throw std::logic_error("Passed arg can't be deserialized!"); +template<> struct try_deserialize_comparg> { +static cv::util::optional exec(const std::string&, cv::gapi::s11n::IIStream&) { + return { }; } }; template -struct deserialize_arg> { -static GCompileArg exec(cv::gapi::s11n::IIStream& is, const std::string& tag) { +struct try_deserialize_comparg> { +static cv::util::optional exec(const std::string& tag, cv::gapi::s11n::IIStream& is) { if (tag == cv::detail::CompileArgTag::tag()) { - return GCompileArg { - cv::gapi::s11n::detail::S11N::deserialize(is) - }; + static_assert(cv::gapi::s11n::detail::has_S11N_spec::value, + "cv::gapi::deserialize expects Types to have S11N " + "specializations with deserialization callbacks!"); + return cv::util::optional( + GCompileArg { cv::gapi::s11n::detail::S11N::deserialize(is) }); } - return deserialize_arg>::exec(is, tag); + return try_deserialize_comparg>::exec(tag, is); } }; @@ -303,17 +305,35 @@ static GRunArg exec(cv::gapi::s11n::IIStream& is, uint32_t idx) { }; template -cv::GCompileArgs getCompileArgs(const std::vector &p) { - std::unique_ptr pIs = cv::gapi::s11n::detail::getInStream(p); - cv::gapi::s11n::IIStream& is = *pIs; +inline cv::util::optional tryDeserializeCompArg(const std::string& tag, + const std::vector& sArg) { + std::unique_ptr pArgIs = cv::gapi::s11n::detail::getInStream(sArg); + return try_deserialize_comparg>::exec(tag, *pArgIs); +} + +template +cv::GCompileArgs getCompileArgs(const std::vector &sArgs) { cv::GCompileArgs args; + std::unique_ptr pIs = cv::gapi::s11n::detail::getInStream(sArgs); + cv::gapi::s11n::IIStream& is = *pIs; + uint32_t sz = 0; is >> sz; for (uint32_t i = 0; i < sz; ++i) { std::string tag; is >> tag; - args.push_back(cv::gapi::detail::deserialize_arg>::exec(is, tag)); + + std::vector sArg; + is >> sArg; + + cv::util::optional dArg = + cv::gapi::detail::tryDeserializeCompArg(tag, sArg); + + if (dArg.has_value()) + { + args.push_back(dArg.value()); + } } return args; diff --git a/modules/gapi/include/opencv2/gapi/s11n/base.hpp b/modules/gapi/include/opencv2/gapi/s11n/base.hpp index 6bf5d5fb0f..d9335ee9f7 100644 --- a/modules/gapi/include/opencv2/gapi/s11n/base.hpp +++ b/modules/gapi/include/opencv2/gapi/s11n/base.hpp @@ -8,6 +8,7 @@ #define OPENCV_GAPI_S11N_BASE_HPP #include +#include namespace cv { namespace gapi { @@ -16,10 +17,14 @@ struct IOStream; struct IIStream; namespace detail { -// Will be used along with default types if possible in specific cases (compile args, etc) -// Note: actual implementation is defined by user + +struct NotImplemented { +}; + +// The default S11N for custom types is NotImplemented +// Don't! sublass from NotImplemented if you actually implement S11N. template -struct S11N { +struct S11N: public NotImplemented { static void serialize(IOStream &, const T &) { GAPI_Assert(false && "No serialization routine is provided!"); } @@ -28,6 +33,11 @@ struct S11N { } }; +template struct has_S11N_spec { + static constexpr bool value = !std::is_base_of::type>>::value; +}; + } // namespace detail } // namespace s11n } // namespace gapi diff --git a/modules/gapi/src/backends/common/serialization.cpp b/modules/gapi/src/backends/common/serialization.cpp index bb1864823f..592c03cfed 100644 --- a/modules/gapi/src/backends/common/serialization.cpp +++ b/modules/gapi/src/backends/common/serialization.cpp @@ -338,8 +338,13 @@ IIStream& operator>> (IIStream& is, cv::gapi::wip::draw::Line &l) { IOStream& operator<< (IOStream& os, const cv::GCompileArg& arg) { + ByteMemoryOutStream tmpS; + arg.serialize(tmpS); + std::vector data = tmpS.data(); + os << arg.tag; - arg.serialize(os); + os << data; + return os; } diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp index 3fe632e449..2fc1e46253 100644 --- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp +++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp @@ -4,32 +4,86 @@ #include namespace { - struct MyCustomType { - int val; - std::string name; - std::vector vec; - std::map mmap; - bool operator==(const MyCustomType& other) const { - return val == other.val && name == other.name && - vec == other.vec && mmap == other.mmap; - } - }; -} +struct EmptyCustomType { }; + +struct SimpleCustomType { + bool val; + bool operator==(const SimpleCustomType& other) const { + return val == other.val; + } +}; + +struct SimpleCustomType2 { + int id; + bool operator==(const SimpleCustomType2& other) const { + return id == other.id; + } +}; + +struct MyCustomType { + int val; + std::string name; + std::vector vec; + std::map mmap; + bool operator==(const MyCustomType& other) const { + return val == other.val && name == other.name && + vec == other.vec && mmap == other.mmap; + } +}; + +struct MyCustomTypeNoS11N { + char sym; + int id; + std::string name; + + bool operator==(const MyCustomTypeNoS11N& other) const { + return sym == other.sym && id == other.id && + name == other.name; + } +}; +} // anonymous namespace namespace cv { namespace gapi { namespace s11n { namespace detail { - template<> struct S11N { - static void serialize(IOStream &os, const MyCustomType &p) { - os << p.val << p.name << p.vec << p.mmap; - } - static MyCustomType deserialize(IIStream &is) { - MyCustomType p; - is >> p.val >> p.name >> p.vec >> p.mmap; - return p; - } - }; +template<> struct S11N { + static void serialize(IOStream &, const EmptyCustomType &) { } + static EmptyCustomType deserialize(IIStream &) { return EmptyCustomType { }; } +}; + +template<> struct S11N { + static void serialize(IOStream &os, const SimpleCustomType &p) { + os << p.val; + } + static SimpleCustomType deserialize(IIStream &is) { + SimpleCustomType p; + is >> p.val; + return p; + } +}; + +template<> struct S11N { + static void serialize(IOStream &os, const SimpleCustomType2 &p) { + os << p.id; + } + static SimpleCustomType2 deserialize(IIStream &is) { + SimpleCustomType2 p; + is >> p.id; + return p; + } +}; + +template<> struct S11N { + static void serialize(IOStream &os, const MyCustomType &p) { + os << p.val << p.name << p.vec << p.mmap; + } + static MyCustomType deserialize(IIStream &is) { + MyCustomType p; + is >> p.val >> p.name >> p.vec >> p.mmap; + return p; + } +}; } // namespace detail } // namespace s11n } // namespace gapi @@ -38,9 +92,33 @@ namespace detail { namespace cv { namespace detail { +template<> struct CompileArgTag { + static const char* tag() { + return "org.opencv.test.empty_custom_type"; + } +}; + +template<> struct CompileArgTag { + static const char* tag() { + return "org.opencv.test.simple_custom_type"; + } +}; + +template<> struct CompileArgTag { + static const char* tag() { + return "org.opencv.test.simple_custom_type_2"; + } +}; + template<> struct CompileArgTag { static const char* tag() { - return "org.opencv.test.mycustomtype"; + return "org.opencv.test.my_custom_type"; + } +}; + +template<> struct CompileArgTag { + static const char* tag() { + return "org.opencv.test.my_custom_type_no_s11n"; } }; } // namespace detail @@ -586,7 +664,7 @@ TEST_F(S11N_Basic, Test_Custom_Type) { EXPECT_EQ(var, new_var); } -TEST_F(S11N_Basic, Test_Custom_CompileArg) { +TEST_F(S11N_Basic, Test_CompileArg) { MyCustomType customVar{1248, "World", {1280, 720, 640, 480}, {{5, 32434142342}, {7, 34242432}}}; std::vector sArgs = cv::gapi::serialize(cv::compile_args(customVar)); @@ -596,4 +674,151 @@ TEST_F(S11N_Basic, Test_Custom_CompileArg) { MyCustomType dCustomVar = cv::gapi::getCompileArg(dArgs).value(); EXPECT_EQ(customVar, dCustomVar); } + +TEST_F(S11N_Basic, Test_CompileArg_Without_UserCallback) { + SimpleCustomType customVar1 { false }; + MyCustomTypeNoS11N customVar2 { 'z', 189, "Name" }; + MyCustomType customVar3 { 1248, "World", {1280, 720, 640, 480}, + {{5, 32434142342}, {7, 34242432}} }; + + EXPECT_NO_THROW(cv::gapi::serialize(cv::compile_args(customVar1, customVar2, customVar3))); + + std::vector sArgs = cv::gapi::serialize( + cv::compile_args(customVar1, customVar2, customVar3)); + + GCompileArgs dArgs = cv::gapi::deserialize(sArgs); + + SimpleCustomType dCustomVar1 = cv::gapi::getCompileArg(dArgs).value(); + MyCustomType dCustomVar3 = cv::gapi::getCompileArg(dArgs).value(); + + EXPECT_EQ(customVar1, dCustomVar1); + EXPECT_EQ(customVar3, dCustomVar3); +} + +TEST_F(S11N_Basic, Test_Deserialize_Only_Requested_CompileArgs) { + MyCustomType myCustomVar { 1248, "World", {1280, 720, 640, 480}, + {{5, 32434142342}, {7, 34242432}} }; + SimpleCustomType simpleCustomVar { false }; + + std::vector sArgs = cv::gapi::serialize(cv::compile_args(myCustomVar, simpleCustomVar)); + + GCompileArgs dArgs = cv::gapi::deserialize(sArgs); + EXPECT_EQ(1u, dArgs.size()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgs).value()); + + dArgs.clear(); + dArgs = cv::gapi::deserialize(sArgs); + EXPECT_EQ(1u, dArgs.size()); + EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg(dArgs).value()); + + dArgs.clear(); + dArgs = cv::gapi::deserialize(sArgs); + EXPECT_EQ(0u, dArgs.size()); + + dArgs.clear(); + dArgs = cv::gapi::deserialize(sArgs); + EXPECT_EQ(2u, dArgs.size()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgs).value()); + EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg(dArgs).value()); + + SimpleCustomType2 simpleCustomVar2 { 5 }; + std::vector sArgs2 = cv::gapi::serialize( + cv::compile_args(myCustomVar, simpleCustomVar, simpleCustomVar2)); + GCompileArgs dArgs2 = cv::gapi::deserialize(sArgs2); + EXPECT_EQ(2u, dArgs2.size()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgs2).value()); + EXPECT_EQ(simpleCustomVar2, cv::gapi::getCompileArg(dArgs2).value()); +} + +TEST_F(S11N_Basic, Test_Deserialize_CompileArgs_RandomOrder) { + SimpleCustomType simpleCustomVar { false }; + SimpleCustomType2 simpleCustomVar2 { 5 }; + + std::vector sArgs = cv::gapi::serialize( + cv::compile_args(simpleCustomVar, simpleCustomVar2)); + GCompileArgs dArgs = cv::gapi::deserialize(sArgs); + + EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg(dArgs).value()); + EXPECT_EQ(simpleCustomVar2, cv::gapi::getCompileArg(dArgs).value()); +} + +TEST_F(S11N_Basic, Test_CompileArgs_With_EmptyCompileArg) { + MyCustomType myCustomVar { 1248, "World", {1280, 720, 640, 480}, + {{5, 32434142342}, {7, 34242432}} }; + SimpleCustomType simpleCustomVar { false }; + EmptyCustomType emptyCustomVar { }; + + //----{ emptyCustomVar, myCustomVar }---- + std::vector sArgs1 = cv::gapi::serialize(cv::compile_args(emptyCustomVar, myCustomVar)); + GCompileArgs dArgsEmptyVar1 = cv::gapi::deserialize(sArgs1); + GCompileArgs dArgsMyVar1 = cv::gapi::deserialize(sArgs1); + GCompileArgs dArgsEmptyAndMyVars1 = cv::gapi::deserialize(sArgs1); + EXPECT_EQ(1u, dArgsEmptyVar1.size()); + EXPECT_TRUE(cv::gapi::getCompileArg(dArgsEmptyVar1).has_value()); + EXPECT_EQ(1u, dArgsMyVar1.size()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgsMyVar1).value()); + EXPECT_EQ(2u, dArgsEmptyAndMyVars1.size()); + EXPECT_TRUE(cv::gapi::getCompileArg(dArgsEmptyAndMyVars1).has_value()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgsEmptyAndMyVars1).value()); + + //----{ myCustomVar, emptyCustomVar }---- + std::vector sArgs2 = cv::gapi::serialize(cv::compile_args(myCustomVar, emptyCustomVar)); + GCompileArgs dArgsMyVar2 = cv::gapi::deserialize(sArgs2); + GCompileArgs dArgsEmptyVar2 = cv::gapi::deserialize(sArgs2); + GCompileArgs dArgsMyAndEmptyVars2 = cv::gapi::deserialize(sArgs2); + EXPECT_EQ(1u, dArgsMyVar2.size()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgsMyVar2).value()); + EXPECT_EQ(1u, dArgsEmptyVar2.size()); + EXPECT_TRUE(cv::gapi::getCompileArg(dArgsEmptyVar2).has_value()); + EXPECT_EQ(2u, dArgsMyAndEmptyVars2.size()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgsMyAndEmptyVars2).value()); + EXPECT_TRUE(cv::gapi::getCompileArg(dArgsMyAndEmptyVars2).has_value()); + + //----{ myCustomVar, emptyCustomVar, simpleCustomVar }---- + std::vector sArgs3 = cv::gapi::serialize( + cv::compile_args(myCustomVar, emptyCustomVar, simpleCustomVar)); + GCompileArgs dArgsMyVar3 = cv::gapi::deserialize(sArgs3); + GCompileArgs dArgsEmptyVar3 = cv::gapi::deserialize(sArgs3); + GCompileArgs dArgsSimpleVar3 = cv::gapi::deserialize(sArgs3); + GCompileArgs dArgsMyAndSimpleVars3 = cv::gapi::deserialize(sArgs3); + GCompileArgs dArgs3 = cv::gapi::deserialize(sArgs3); + EXPECT_EQ(1u, dArgsMyVar3.size()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgsMyVar3).value()); + EXPECT_EQ(1u, dArgsEmptyVar3.size()); + EXPECT_TRUE(cv::gapi::getCompileArg(dArgsEmptyVar3).has_value()); + EXPECT_EQ(1u, dArgsSimpleVar3.size()); + EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg(dArgsSimpleVar3).value()); + EXPECT_EQ(2u, dArgsMyAndSimpleVars3.size()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgsMyAndSimpleVars3).value()); + EXPECT_EQ(simpleCustomVar, + cv::gapi::getCompileArg(dArgsMyAndSimpleVars3).value()); + EXPECT_EQ(3u, dArgs3.size()); + EXPECT_EQ(myCustomVar, cv::gapi::getCompileArg(dArgs3).value()); + EXPECT_TRUE(cv::gapi::getCompileArg(dArgs3).has_value()); + EXPECT_EQ(simpleCustomVar, cv::gapi::getCompileArg(dArgs3).value()); + + //----{ emptyCustomVar }---- + std::vector sArgs4 = cv::gapi::serialize(cv::compile_args(emptyCustomVar)); + GCompileArgs dArgsEmptyVar4 = cv::gapi::deserialize(sArgs4); + EXPECT_EQ(1u, dArgsEmptyVar4.size()); + EXPECT_TRUE(cv::gapi::getCompileArg(dArgsEmptyVar4).has_value()); +} + } // namespace opencv_test From 099ad1a259f1f83af6da6826e78ebd645ee94004 Mon Sep 17 00:00:00 2001 From: Dmitry Matveev Date: Wed, 28 Oct 2020 16:35:38 +0300 Subject: [PATCH 061/152] G-API: Desync -- fix the queue saturation problem Set queue size = 1 to Copy island right after the desync. In this case, Copy won't read more data from a "last_written" container than required, while feeding the desynchronized path. Sometimes Copy don't get fused into an island and behaves on its own -- in this case, it reads more data in advance so the slow (desync) part actually processes some data in-sync (more than actually required) --- .../gapi/src/executor/gstreamingexecutor.cpp | 44 ++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/modules/gapi/src/executor/gstreamingexecutor.cpp b/modules/gapi/src/executor/gstreamingexecutor.cpp index 41cb83f710..653d20e712 100644 --- a/modules/gapi/src/executor/gstreamingexecutor.cpp +++ b/modules/gapi/src/executor/gstreamingexecutor.cpp @@ -13,6 +13,10 @@ #include +#if !defined(GAPI_STANDALONE) +#include // GCopy -- FIXME - to be removed! +#endif // GAPI_STANDALONE + #include "api/gproto_priv.hpp" // ptr(GRunArgP) #include "compiler/passes/passes.hpp" #include "backends/common/gbackend.hpp" // createMat @@ -80,6 +84,10 @@ struct DataQueue { std::shared_ptr q; }; +struct DesyncSpecialCase { + static const char *name() { return "DesyncSpecialCase"; } +}; + std::vector reader_queues( ade::Graph &g, const ade::NodeHandle &obj) { @@ -936,19 +944,53 @@ cv::gimpl::GStreamingExecutor::GStreamingExecutor(std::unique_ptr && , isl_exec }); // Initialize queues for every operation's input - ade::TypedGraph qgr(*m_island_graph); + ade::TypedGraph qgr(*m_island_graph); + bool is_desync_start = false; for (auto eh : nh->inEdges()) { // ...only if the data is not compile-const if (const_ins.count(eh->srcNode()) == 0) { if (m_gim.metadata(eh).contains()) { qgr.metadata(eh).set(DataQueue(DataQueue::DESYNC)); + is_desync_start = true; + } else if (qgr.metadata(eh).contains()) { + // See comment below + // Limit queue size to 1 in this case + qgr.metadata(eh).set(DataQueue(1u)); } else { qgr.metadata(eh).set(DataQueue(queue_capacity)); } m_internal_queues.insert(qgr.metadata(eh).get().q.get()); } } + // WORKAROUND: + // Since now we always know desync() is followed by copy(), + // copy is always the island with DesyncIslEdge. + // Mark the node's outputs a special way so then its following + // queue sizes will be limited to 1 (to avoid copy reading more + // data in advance - as there's no other way for the underlying + // "slow" part to control it) + if (is_desync_start) { + auto isl = m_gim.metadata(nh).get().object; + // In the current implementation, such islands + // _must_ start with copy + GAPI_Assert(isl->in_ops().size() == 1u); +#if !defined(GAPI_STANDALONE) + GAPI_Assert(GModel::Graph(*m_orig_graph) + .metadata(*isl->in_ops().begin()) + .get() + .k.name == cv::gapi::core::GCopy::id()); +#endif // GAPI_STANDALONE + for (auto out_nh : nh->outNodes()) { + for (auto out_eh : out_nh->outEdges()) { + qgr.metadata(out_eh).set(DesyncSpecialCase{}); + } + } + } + // It is ok to do it here since the graph is visited in + // a topologic order and its consumers (those checking + // their input edges & initializing queues) are yet to be + // visited } break; case NodeKind::SLOT: From 2a3cdba724aaf9871b988f8d7887c1899afb0f6d Mon Sep 17 00:00:00 2001 From: Anatoliy Talamanov Date: Tue, 3 Nov 2020 20:47:05 +0300 Subject: [PATCH 062/152] Merge pull request #18701 from TolyaTalamanov:at/introduce-config-for-ie-params Expand ie::Params to support config * Add config to IE params * Add test * Remove comments from tests * Rename to pluginConfig * Add one more overloads for pluginConfig * Add more tests --- .../gapi/include/opencv2/gapi/infer/ie.hpp | 34 +++++- modules/gapi/src/backends/ie/giebackend.cpp | 4 +- .../gapi/test/infer/gapi_infer_ie_test.cpp | 102 ++++++++++++++++++ 3 files changed, 135 insertions(+), 5 deletions(-) diff --git a/modules/gapi/include/opencv2/gapi/infer/ie.hpp b/modules/gapi/include/opencv2/gapi/infer/ie.hpp index a8bc0bb05d..53e31fbb09 100644 --- a/modules/gapi/include/opencv2/gapi/infer/ie.hpp +++ b/modules/gapi/include/opencv2/gapi/infer/ie.hpp @@ -11,6 +11,7 @@ #include #include #include // tuple, tuple_size +#include #include #include @@ -42,6 +43,8 @@ enum class TraitAs: int IMAGE //!< G-API traits an associated cv::Mat as an image so creates an "image" blob (NCHW/NHWC, etc) }; +using IEConfig = std::map; + namespace detail { struct ParamDesc { std::string model_path; @@ -63,6 +66,7 @@ namespace detail { enum class Kind { Load, Import }; Kind kind; bool is_generic; + IEConfig config; }; } // namespace detail @@ -86,7 +90,8 @@ public: , std::tuple_size::value // num_in , std::tuple_size::value // num_out , detail::ParamDesc::Kind::Load - , false} { + , false + , {}} { }; Params(const std::string &model, @@ -95,7 +100,8 @@ public: , std::tuple_size::value // num_in , std::tuple_size::value // num_out , detail::ParamDesc::Kind::Import - , false} { + , false + , {}} { }; Params& cfgInputLayers(const typename PortCfg::In &ll) { @@ -121,6 +127,16 @@ public: return *this; } + Params& pluginConfig(IEConfig&& cfg) { + desc.config = std::move(cfg); + return *this; + } + + Params& pluginConfig(const IEConfig& cfg) { + desc.config = cfg; + return *this; + } + // BEGIN(G-API's network parametrization API) GBackend backend() const { return cv::gapi::ie::backend(); } std::string tag() const { return Net::tag(); } @@ -138,15 +154,25 @@ public: const std::string &model, const std::string &weights, const std::string &device) - : desc{ model, weights, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Load, true}, m_tag(tag) { + : desc{ model, weights, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Load, true, {}}, m_tag(tag) { }; Params(const std::string &tag, const std::string &model, const std::string &device) - : desc{ model, {}, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Import, true}, m_tag(tag) { + : desc{ model, {}, device, {}, {}, {}, 0u, 0u, detail::ParamDesc::Kind::Import, true, {}}, m_tag(tag) { }; + Params& pluginConfig(IEConfig&& cfg) { + desc.config = std::move(cfg); + return *this; + } + + Params& pluginConfig(const IEConfig& cfg) { + desc.config = cfg; + return *this; + } + // BEGIN(G-API's network parametrization API) GBackend backend() const { return cv::gapi::ie::backend(); } std::string tag() const { return m_tag; } diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp index c66fa44361..85c0236ff1 100644 --- a/modules/gapi/src/backends/ie/giebackend.cpp +++ b/modules/gapi/src/backends/ie/giebackend.cpp @@ -185,7 +185,8 @@ struct IEUnit { inputs = net.getInputsInfo(); outputs = net.getOutputsInfo(); } else if (params.kind == cv::gapi::ie::detail::ParamDesc::Kind::Import) { - this_plugin = cv::gimpl::ie::wrap::getPlugin(params); + this_plugin = cv::gimpl::ie::wrap::getPlugin(params); + this_plugin.SetConfig(params.config); this_network = cv::gimpl::ie::wrap::importNetwork(this_plugin, params); // FIXME: ICNNetwork returns InputsDataMap/OutputsDataMap, // but ExecutableNetwork returns ConstInputsDataMap/ConstOutputsDataMap @@ -225,6 +226,7 @@ struct IEUnit { // FIXME: In case importNetwork for fill inputs/outputs need to obtain ExecutableNetwork, but // for loadNetwork they can be obtained by using readNetwork non_const_this->this_plugin = cv::gimpl::ie::wrap::getPlugin(params); + non_const_this->this_plugin.SetConfig(params.config); non_const_this->this_network = cv::gimpl::ie::wrap::loadNetwork(non_const_this->this_plugin, net, params); } diff --git a/modules/gapi/test/infer/gapi_infer_ie_test.cpp b/modules/gapi/test/infer/gapi_infer_ie_test.cpp index 3125705365..547c7c7d33 100644 --- a/modules/gapi/test/infer/gapi_infer_ie_test.cpp +++ b/modules/gapi/test/infer/gapi_infer_ie_test.cpp @@ -403,6 +403,108 @@ TEST(TestAgeGenderIE, GenericInfer) normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output"); } +TEST(TestAgeGenderIE, InvalidConfigGeneric) +{ + initDLDTDataPath(); + + std::string model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml"); + std::string weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin"); + std::string device_id = "CPU"; + + // Configure & run G-API + cv::GMat in; + GInferInputs inputs; + inputs["data"] = in; + + auto outputs = cv::gapi::infer("age-gender-generic", inputs); + auto age = outputs.at("age_conv3"); + auto gender = outputs.at("prob"); + cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender)); + + auto pp = cv::gapi::ie::Params{"age-gender-generic", + model_path, + weights_path, + device_id}.pluginConfig({{"unsupported_config", "some_value"}}); + + EXPECT_ANY_THROW(comp.compile(cv::GMatDesc{CV_8U,3,cv::Size{320, 240}}, + cv::compile_args(cv::gapi::networks(pp)))); +} + +TEST(TestAgeGenderIE, CPUConfigGeneric) +{ + initDLDTDataPath(); + + std::string model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml"); + std::string weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin"); + std::string device_id = "CPU"; + + // Configure & run G-API + cv::GMat in; + GInferInputs inputs; + inputs["data"] = in; + + auto outputs = cv::gapi::infer("age-gender-generic", inputs); + auto age = outputs.at("age_conv3"); + auto gender = outputs.at("prob"); + cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender)); + + auto pp = cv::gapi::ie::Params{"age-gender-generic", + model_path, + weights_path, + device_id}.pluginConfig({{"ENFORCE_BF16", "NO"}}); + + EXPECT_NO_THROW(comp.compile(cv::GMatDesc{CV_8U,3,cv::Size{320, 240}}, + cv::compile_args(cv::gapi::networks(pp)))); +} + +TEST(TestAgeGenderIE, InvalidConfig) +{ + initDLDTDataPath(); + + std::string model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml"); + std::string weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin"); + std::string device_id = "CPU"; + + using AGInfo = std::tuple; + G_API_NET(AgeGender, , "test-age-gender"); + + cv::GMat in; + cv::GMat age, gender; + std::tie(age, gender) = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender)); + + auto pp = cv::gapi::ie::Params { + model_path, weights_path, device_id + }.cfgOutputLayers({ "age_conv3", "prob" }).pluginConfig({{"unsupported_config", "some_value"}}); + + EXPECT_ANY_THROW(comp.compile(cv::GMatDesc{CV_8U,3,cv::Size{320, 240}}, + cv::compile_args(cv::gapi::networks(pp)))); +} + +TEST(TestAgeGenderIE, CPUConfig) +{ + initDLDTDataPath(); + + std::string model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml"); + std::string weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin"); + std::string device_id = "CPU"; + + using AGInfo = std::tuple; + G_API_NET(AgeGender, , "test-age-gender"); + + cv::GMat in; + cv::GMat age, gender; + std::tie(age, gender) = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender)); + + auto pp = cv::gapi::ie::Params { + model_path, weights_path, device_id + }.cfgOutputLayers({ "age_conv3", "prob" }).pluginConfig({{"ENFORCE_BF16", "NO"}}); + + EXPECT_NO_THROW(comp.compile(cv::GMatDesc{CV_8U,3,cv::Size{320, 240}}, + cv::compile_args(cv::gapi::networks(pp)))); +} + } // namespace opencv_test #endif // HAVE_INF_ENGINE From a110ede0a253a88c3b925a40b42d8c1013fbdfe4 Mon Sep 17 00:00:00 2001 From: Dmitry Matveev Date: Tue, 3 Nov 2020 21:39:16 +0300 Subject: [PATCH 063/152] Merge pull request #18716 from dmatveev:dm/upstream_onnx * G-API: Introduce ONNX backend for Inference - Basic operations are implemented (Infer, -ROI, -List, -List2); - Implemented automatic preprocessing for ONNX models; - Test suite is extended with `OPENCV_GAPI_ONNX_MODEL_PATH` env for test data (test data is an ONNX Model Zoo repo snapshot); - Fixed kernel lookup logic in core G-API: - Lookup NN kernels not in the default package, but in the associated backend's aux package. Now two NN backends can work in the same graph. - Added Infer SSD demo and a combined ONNX/IE demo; * G-API/ONNX: Fix some of CMake issues Co-authored-by: Pashchenkov, Maxim --- CMakeLists.txt | 17 + cmake/FindONNX.cmake | 36 + modules/gapi/CMakeLists.txt | 13 + .../gapi/include/opencv2/gapi/infer/onnx.hpp | 138 +++ modules/gapi/samples/infer_ie_onnx_hybrid.cpp | 195 ++++ modules/gapi/samples/infer_ssd_onnx.cpp | 213 ++++ .../gapi/src/backends/onnx/gonnxbackend.cpp | 955 ++++++++++++++++++ .../gapi/src/backends/onnx/gonnxbackend.hpp | 56 + modules/gapi/src/compiler/passes/kernels.cpp | 25 +- .../gapi/test/infer/gapi_infer_onnx_test.cpp | 278 +++++ 10 files changed, 1920 insertions(+), 6 deletions(-) create mode 100644 cmake/FindONNX.cmake create mode 100644 modules/gapi/include/opencv2/gapi/infer/onnx.hpp create mode 100644 modules/gapi/samples/infer_ie_onnx_hybrid.cpp create mode 100644 modules/gapi/samples/infer_ssd_onnx.cpp create mode 100644 modules/gapi/src/backends/onnx/gonnxbackend.cpp create mode 100644 modules/gapi/src/backends/onnx/gonnxbackend.hpp create mode 100644 modules/gapi/test/infer/gapi_infer_onnx_test.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4350b2fe2a..f3ca52fd4b 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -439,6 +439,9 @@ OCV_OPTION(WITH_ANDROID_MEDIANDK "Use Android Media NDK for Video I/O (Android)" OCV_OPTION(WITH_TENGINE "Include Arm Inference Tengine support" OFF VISIBLE_IF (ARM OR AARCH64) AND (UNIX OR ANDROID) AND NOT IOS VERIFY HAVE_TENGINE) +OCV_OPTION(WITH_ONNX "Include Microsoft ONNX Runtime support" OFF + VISIBLE_IF TRUE + VERIFY HAVE_ONNX) # OpenCV build components # =================================================== @@ -775,6 +778,11 @@ if(WITH_QUIRC) add_subdirectory(3rdparty/quirc) set(HAVE_QUIRC TRUE) endif() + +if(WITH_ONNX) + include(cmake/FindONNX.cmake) +endif() + # ---------------------------------------------------------------------------- # OpenCV HAL # ---------------------------------------------------------------------------- @@ -1556,6 +1564,15 @@ if(WITH_OPENCL OR HAVE_OPENCL) endif() endif() +if(WITH_ONNX OR HAVE_ONNX) + status("") + status(" ONNX:" HAVE_ONNX THEN "YES" ELSE "NO") + if(HAVE_ONNX) + status(" Include path:" ONNX_INCLUDE_DIR THEN "${ONNX_INCLUDE_DIR}" ELSE "NO") + status(" Link libraries:" ONNX_LIBRARIES THEN "${ONNX_LIBRARIES}" ELSE "NO") + endif() +endif() + # ========================== python ========================== if(BUILD_opencv_python2) status("") diff --git a/cmake/FindONNX.cmake b/cmake/FindONNX.cmake new file mode 100644 index 0000000000..51aa77b460 --- /dev/null +++ b/cmake/FindONNX.cmake @@ -0,0 +1,36 @@ +ocv_clear_vars(HAVE_ONNX) + +set(ONNXRT_ROOT_DIR "" CACHE PATH "ONNX Runtime install directory") + +# For now, check the old name ORT_INSTALL_DIR +if(ORT_INSTALL_DIR AND NOT ONNXRT_ROOT_DIR) + set(ONNXRT_ROOT_DIR ORT_INSTALL_DIR) +endif() + +if(ONNXRT_ROOT_DIR) + find_library(ORT_LIB onnxruntime + ${ONNXRT_ROOT_DIR}/lib + CMAKE_FIND_ROOT_PATH_BOTH) + find_path(ORT_INCLUDE onnxruntime_cxx_api.h + ${ONNXRT_ROOT_DIR}/include/onnxruntime/core/session + CMAKE_FIND_ROOT_PATH_BOTH) +endif() + +if(ORT_LIB AND ORT_INCLUDE) + set(HAVE_ONNX TRUE) + # For CMake output only + set(ONNX_LIBRARIES "${ORT_LIB}" CACHE STRING "ONNX Runtime libraries") + set(ONNX_INCLUDE_DIR "${ORT_INCLUDE}" CACHE STRING "ONNX Runtime include path") + + # Link target with associated interface headers + set(ONNX_LIBRARY "onnxruntime" CACHE STRING "ONNX Link Target") + ocv_add_library(${ONNX_LIBRARY} SHARED IMPORTED) + set_target_properties(${ONNX_LIBRARY} PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES ${ORT_INCLUDE} + IMPORTED_LOCATION ${ORT_LIB} + IMPORTED_IMPLIB ${ORT_LIB}) +endif() + +if(NOT HAVE_ONNX) + ocv_clear_vars(HAVE_ONNX ORT_LIB ORT_INCLUDE_DIR) +endif() diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt index acfbd1d70e..d95f255951 100644 --- a/modules/gapi/CMakeLists.txt +++ b/modules/gapi/CMakeLists.txt @@ -131,6 +131,9 @@ set(gapi_srcs src/backends/ie/giebackend.cpp src/backends/ie/giebackend/giewrapper.cpp + # ONNX Backend. + src/backends/onnx/gonnxbackend.cpp + # Render Backend. src/backends/render/grenderocv.cpp src/backends/render/ft_render.cpp @@ -205,10 +208,20 @@ if(HAVE_PLAIDML) ocv_target_include_directories(${the_module} SYSTEM PRIVATE ${PLAIDML_INCLUDE_DIRS}) endif() + if(WIN32) # Required for htonl/ntohl on Windows ocv_target_link_libraries(${the_module} PRIVATE wsock32 ws2_32) endif() +if(HAVE_ONNX) + ocv_target_link_libraries(${the_module} PRIVATE ${ONNX_LIBRARY}) + ocv_target_compile_definitions(${the_module} PRIVATE HAVE_ONNX=1) + if(TARGET opencv_test_gapi) + ocv_target_compile_definitions(opencv_test_gapi PRIVATE HAVE_ONNX=1) + ocv_target_link_libraries(opencv_test_gapi PRIVATE ${ONNX_LIBRARY}) + endif() +endif() + ocv_add_perf_tests() ocv_add_samples() diff --git a/modules/gapi/include/opencv2/gapi/infer/onnx.hpp b/modules/gapi/include/opencv2/gapi/infer/onnx.hpp new file mode 100644 index 0000000000..d61ceb3dca --- /dev/null +++ b/modules/gapi/include/opencv2/gapi/infer/onnx.hpp @@ -0,0 +1,138 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_INFER_ONNX_HPP +#define OPENCV_GAPI_INFER_ONNX_HPP + +#include +#include +#include +#include // tuple, tuple_size + +#include +#include + +#include // GAPI_EXPORTS +#include // GKernelPackage + +namespace cv { +namespace gapi { +namespace onnx { + +GAPI_EXPORTS cv::gapi::GBackend backend(); + +enum class TraitAs: int { + TENSOR, //!< G-API traits an associated cv::Mat as a raw tensor + // and passes dimensions as-is + IMAGE //!< G-API traits an associated cv::Mat as an image so + // creates an "image" blob (NCHW/NHWC, etc) +}; + +using PostProc = std::function &, + std::unordered_map &)>; + + +namespace detail { +struct ParamDesc { + std::string model_path; + + // NB: nun_* may differ from topology's real input/output port numbers + // (e.g. topology's partial execution) + std::size_t num_in; // How many inputs are defined in the operation + std::size_t num_out; // How many outputs are defined in the operation + + // NB: Here order follows the `Net` API + std::vector input_names; + std::vector output_names; + + using ConstInput = std::pair; + std::unordered_map const_inputs; + + std::vector mean; + std::vector stdev; + + std::vector out_metas; + PostProc custom_post_proc; + + std::vector normalize; +}; +} // namespace detail + +template +struct PortCfg { + using In = std::array + < std::string + , std::tuple_size::value >; + using Out = std::array + < std::string + , std::tuple_size::value >; + using NormCoefs = std::array + < cv::Scalar + , std::tuple_size::value >; + using Normalize = std::array + < bool + , std::tuple_size::value >; +}; + +template class Params { +public: + Params(const std::string &model) { + desc.model_path = model; + desc.num_in = std::tuple_size::value; + desc.num_out = std::tuple_size::value; + }; + + // BEGIN(G-API's network parametrization API) + GBackend backend() const { return cv::gapi::onnx::backend(); } + std::string tag() const { return Net::tag(); } + cv::util::any params() const { return { desc }; } + // END(G-API's network parametrization API) + + Params& cfgInputLayers(const typename PortCfg::In &ll) { + desc.input_names.assign(ll.begin(), ll.end()); + return *this; + } + + Params& cfgOutputLayers(const typename PortCfg::Out &ll) { + desc.output_names.assign(ll.begin(), ll.end()); + return *this; + } + + Params& constInput(const std::string &layer_name, + const cv::Mat &data, + TraitAs hint = TraitAs::TENSOR) { + desc.const_inputs[layer_name] = {data, hint}; + return *this; + } + + Params& cfgMeanStd(const typename PortCfg::NormCoefs &m, + const typename PortCfg::NormCoefs &s) { + desc.mean.assign(m.begin(), m.end()); + desc.stdev.assign(s.begin(), s.end()); + return *this; + } + + Params& cfgPostProc(const std::vector &outs, + const PostProc &pp) { + desc.out_metas = outs; + desc.custom_post_proc = pp; + return *this; + } + + Params& cfgNormalize(const typename PortCfg::Normalize &n) { + desc.normalize.assign(n.begin(), n.end()); + return *this; + } + +protected: + detail::ParamDesc desc; +}; + +} // namespace onnx +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_INFER_HPP diff --git a/modules/gapi/samples/infer_ie_onnx_hybrid.cpp b/modules/gapi/samples/infer_ie_onnx_hybrid.cpp new file mode 100644 index 0000000000..b8612a25ca --- /dev/null +++ b/modules/gapi/samples/infer_ie_onnx_hybrid.cpp @@ -0,0 +1,195 @@ +#include +#include + +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" + +#include "opencv2/gapi.hpp" +#include "opencv2/gapi/core.hpp" +#include "opencv2/gapi/imgproc.hpp" +#include "opencv2/gapi/infer.hpp" +#include "opencv2/gapi/infer/ie.hpp" +#include "opencv2/gapi/infer/onnx.hpp" +#include "opencv2/gapi/cpu/gcpukernel.hpp" +#include "opencv2/gapi/streaming/cap.hpp" + +namespace { +const std::string keys = + "{ h help | | print this help message }" + "{ input | | Path to an input video file }" + "{ fdm | | IE face detection model IR }" + "{ fdw | | IE face detection model weights }" + "{ fdd | | IE face detection device }" + "{ emom | | ONNX emotions recognition model }" + "{ output | | (Optional) Path to an output video file }" + ; +} // namespace + +namespace custom { +G_API_NET(Faces, , "face-detector"); +G_API_NET(Emotions, , "emotions-recognition"); + +G_API_OP(PostProc, (cv::GMat, cv::GMat)>, "custom.fd_postproc") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GMatDesc &) { + return cv::empty_array_desc(); + } +}; + +GAPI_OCV_KERNEL(OCVPostProc, PostProc) { + static void run(const cv::Mat &in_ssd_result, + const cv::Mat &in_frame, + std::vector &out_faces) { + const int MAX_PROPOSALS = 200; + const int OBJECT_SIZE = 7; + const cv::Size upscale = in_frame.size(); + const cv::Rect surface({0,0}, upscale); + + out_faces.clear(); + + const float *data = in_ssd_result.ptr(); + for (int i = 0; i < MAX_PROPOSALS; i++) { + const float image_id = data[i * OBJECT_SIZE + 0]; // batch id + const float confidence = data[i * OBJECT_SIZE + 2]; + const float rc_left = data[i * OBJECT_SIZE + 3]; + const float rc_top = data[i * OBJECT_SIZE + 4]; + const float rc_right = data[i * OBJECT_SIZE + 5]; + const float rc_bottom = data[i * OBJECT_SIZE + 6]; + + if (image_id < 0.f) { // indicates end of detections + break; + } + if (confidence < 0.5f) { + continue; + } + + cv::Rect rc; + rc.x = static_cast(rc_left * upscale.width); + rc.y = static_cast(rc_top * upscale.height); + rc.width = static_cast(rc_right * upscale.width) - rc.x; + rc.height = static_cast(rc_bottom * upscale.height) - rc.y; + out_faces.push_back(rc & surface); + } + } +}; +//! [Postproc] + +} // namespace custom + +namespace labels { +// Labels as defined in +// https://github.com/onnx/models/tree/master/vision/body_analysis/emotion_ferplus +// +const std::string emotions[] = { + "neutral", "happiness", "surprise", "sadness", "anger", "disgust", "fear", "contempt" +}; +namespace { +template +std::vector softmax(Iter begin, Iter end) { + std::vector prob(end - begin, 0.f); + std::transform(begin, end, prob.begin(), [](float x) { return std::exp(x); }); + float sum = std::accumulate(prob.begin(), prob.end(), 0.0f); + for (int i = 0; i < static_cast(prob.size()); i++) + prob[i] /= sum; + return prob; +} + +void DrawResults(cv::Mat &frame, + const std::vector &faces, + const std::vector &out_emotions) { + CV_Assert(faces.size() == out_emotions.size()); + + for (auto it = faces.begin(); it != faces.end(); ++it) { + const auto idx = std::distance(faces.begin(), it); + const auto &rc = *it; + + const float *emotions_data = out_emotions[idx].ptr(); + auto sm = softmax(emotions_data, emotions_data + 8); + const auto emo_id = std::max_element(sm.begin(), sm.end()) - sm.begin(); + + const int ATTRIB_OFFSET = 15; + cv::rectangle(frame, rc, {0, 255, 0}, 4); + cv::putText(frame, emotions[emo_id], + cv::Point(rc.x, rc.y - ATTRIB_OFFSET), + cv::FONT_HERSHEY_COMPLEX_SMALL, + 1, + cv::Scalar(0, 0, 255)); + + std::cout << emotions[emo_id] << " at " << rc << std::endl; + } +} +} // anonymous namespace +} // namespace labels + +int main(int argc, char *argv[]) +{ + cv::CommandLineParser cmd(argc, argv, keys); + if (cmd.has("help")) { + cmd.printMessage(); + return 0; + } + const std::string input = cmd.get("input"); + const std::string output = cmd.get("output"); + + // OpenVINO FD parameters here + auto det_net = cv::gapi::ie::Params { + cmd.get("fdm"), // read cmd args: path to topology IR + cmd.get("fdw"), // read cmd args: path to weights + cmd.get("fdd"), // read cmd args: device specifier + }; + + // ONNX Emotions parameters here + auto emo_net = cv::gapi::onnx::Params { + cmd.get("emom"), // read cmd args: path to the ONNX model + }.cfgNormalize({false}); // model accepts 0..255 range in FP32 + + auto kernels = cv::gapi::kernels(); + auto networks = cv::gapi::networks(det_net, emo_net); + + cv::GMat in; + cv::GMat bgr = cv::gapi::copy(in); + cv::GMat frame = cv::gapi::streaming::desync(bgr); + cv::GMat detections = cv::gapi::infer(frame); + cv::GArray faces = custom::PostProc::on(detections, frame); + cv::GArray emotions = cv::gapi::infer(faces, frame); + auto pipeline = cv::GComputation(cv::GIn(in), cv::GOut(bgr, faces, emotions)) + .compileStreaming(cv::compile_args(kernels, networks)); + + auto in_src = cv::gapi::wip::make_src(input); + pipeline.setSource(cv::gin(in_src)); + pipeline.start(); + + cv::util::optional out_frame; + cv::util::optional> out_faces; + cv::util::optional> out_emotions; + + cv::Mat last_mat; + std::vector last_faces; + std::vector last_emotions; + + cv::VideoWriter writer; + + while (pipeline.pull(cv::gout(out_frame, out_faces, out_emotions))) { + if (out_faces && out_emotions) { + last_faces = *out_faces; + last_emotions = *out_emotions; + } + if (out_frame) { + last_mat = *out_frame; + labels::DrawResults(last_mat, last_faces, last_emotions); + + if (!output.empty()) { + if (!writer.isOpened()) { + const auto sz = cv::Size{last_mat.cols, last_mat.rows}; + writer.open(output, cv::VideoWriter::fourcc('M','J','P','G'), 25.0, sz); + CV_Assert(writer.isOpened()); + } + writer << last_mat; + } + } + if (!last_mat.empty()) { + cv::imshow("Out", last_mat); + cv::waitKey(1); + } + } + return 0; +} diff --git a/modules/gapi/samples/infer_ssd_onnx.cpp b/modules/gapi/samples/infer_ssd_onnx.cpp new file mode 100644 index 0000000000..fc26ca1e36 --- /dev/null +++ b/modules/gapi/samples/infer_ssd_onnx.cpp @@ -0,0 +1,213 @@ +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace custom { + +G_API_NET(ObjDetector, , "object-detector"); + +using GDetections = cv::GArray; +using GSize = cv::GOpaque; +using GPrims = cv::GArray; + +G_API_OP(GetSize, , "sample.custom.get-size") { + static cv::GOpaqueDesc outMeta(const cv::GMatDesc &) { + return cv::empty_gopaque_desc(); + } +}; +G_API_OP(ParseSSD, , "sample.custom.parse-ssd") { + static cv::GArrayDesc outMeta(const cv::GMatDesc &, const cv::GOpaqueDesc &) { + return cv::empty_array_desc(); + } +}; +G_API_OP(BBoxes, , "sample.custom.b-boxes") { + static cv::GArrayDesc outMeta(const cv::GArrayDesc &) { + return cv::empty_array_desc(); + } +}; + +GAPI_OCV_KERNEL(OCVGetSize, GetSize) { + static void run(const cv::Mat &in, cv::Size &out) { + out = {in.cols, in.rows}; + } +}; +GAPI_OCV_KERNEL(OCVParseSSD, ParseSSD) { + static void run(const cv::Mat &in_ssd_result, + const cv::Size &in_parent_size, + std::vector &out_objects) { + const auto &in_ssd_dims = in_ssd_result.size; + CV_Assert(in_ssd_dims.dims() == 4u); + + const int MAX_PROPOSALS = in_ssd_dims[2]; + const int OBJECT_SIZE = in_ssd_dims[3]; + + CV_Assert(OBJECT_SIZE == 7); // fixed SSD object size + + const cv::Rect surface({0,0}, in_parent_size); + + out_objects.clear(); + + const float *data = in_ssd_result.ptr(); + for (int i = 0; i < MAX_PROPOSALS; i++) { + const float image_id = data[i * OBJECT_SIZE + 0]; + const float label = data[i * OBJECT_SIZE + 1]; + const float confidence = data[i * OBJECT_SIZE + 2]; + const float rc_left = data[i * OBJECT_SIZE + 3]; + const float rc_top = data[i * OBJECT_SIZE + 4]; + const float rc_right = data[i * OBJECT_SIZE + 5]; + const float rc_bottom = data[i * OBJECT_SIZE + 6]; + (void) label; // unused + + if (image_id < 0.f) { + break; // marks end-of-detections + } + if (confidence < 0.5f) { + continue; // skip objects with low confidence + } + + // map relative coordinates to the original image scale + cv::Rect rc; + rc.x = static_cast(rc_left * in_parent_size.width); + rc.y = static_cast(rc_top * in_parent_size.height); + rc.width = static_cast(rc_right * in_parent_size.width) - rc.x; + rc.height = static_cast(rc_bottom * in_parent_size.height) - rc.y; + out_objects.emplace_back(rc & surface); + } + } +}; +GAPI_OCV_KERNEL(OCVBBoxes, BBoxes) { + // This kernel converts the rectangles into G-API's + // rendering primitives + static void run(const std::vector &in_obj_rcs, + std::vector &out_prims) { + out_prims.clear(); + const auto cvt = [](const cv::Rect &rc, const cv::Scalar &clr) { + return cv::gapi::wip::draw::Rect(rc, clr, 2); + }; + for (auto &&rc : in_obj_rcs) { + out_prims.emplace_back(cvt(rc, CV_RGB(0,255,0))); // green + } + + std::cout << "Detections:"; + for (auto &&rc : in_obj_rcs) std::cout << ' ' << rc; + std::cout << std::endl; + } +}; + +} // namespace custom + +namespace { +void remap_ssd_ports(const std::unordered_map &onnx, + std::unordered_map &gapi) { + // Assemble ONNX-processed outputs back to a single 1x1x200x7 blob + // to preserve compatibility with OpenVINO-based SSD pipeline + const cv::Mat &num_detections = onnx.at("num_detections:0"); + const cv::Mat &detection_boxes = onnx.at("detection_boxes:0"); + const cv::Mat &detection_scores = onnx.at("detection_scores:0"); + const cv::Mat &detection_classes = onnx.at("detection_classes:0"); + + GAPI_Assert(num_detections.depth() == CV_32F); + GAPI_Assert(detection_boxes.depth() == CV_32F); + GAPI_Assert(detection_scores.depth() == CV_32F); + GAPI_Assert(detection_classes.depth() == CV_32F); + + cv::Mat &ssd_output = gapi.at("detection_output"); + + const int num_objects = static_cast(num_detections.ptr()[0]); + const float *in_boxes = detection_boxes.ptr(); + const float *in_scores = detection_scores.ptr(); + const float *in_classes = detection_classes.ptr(); + float *ptr = ssd_output.ptr(); + + for (int i = 0; i < num_objects; i++) { + ptr[0] = 0.f; // "image_id" + ptr[1] = in_classes[i]; // "label" + ptr[2] = in_scores[i]; // "confidence" + ptr[3] = in_boxes[4*i + 1]; // left + ptr[4] = in_boxes[4*i + 0]; // top + ptr[5] = in_boxes[4*i + 3]; // right + ptr[6] = in_boxes[4*i + 2]; // bottom + + ptr += 7; + in_boxes += 4; + } + if (num_objects < ssd_output.size[2]-1) { + // put a -1 mark at the end of output blob if there is space left + ptr[0] = -1.f; + } +} +} // anonymous namespace + + +const std::string keys = + "{ h help | | Print this help message }" + "{ input | | Path to the input video file }" + "{ output | | (Optional) path to output video file }" + "{ detm | | Path to an ONNX SSD object detection model (.onnx) }" + ; + +int main(int argc, char *argv[]) +{ + cv::CommandLineParser cmd(argc, argv, keys); + if (cmd.has("help")) { + cmd.printMessage(); + return 0; + } + + // Prepare parameters first + const std::string input = cmd.get("input"); + const std::string output = cmd.get("output"); + const auto obj_model_path = cmd.get("detm"); + + auto obj_net = cv::gapi::onnx::Params{obj_model_path} + .cfgOutputLayers({"detection_output"}) + .cfgPostProc({cv::GMatDesc{CV_32F, {1,1,200,7}}}, remap_ssd_ports); + auto kernels = cv::gapi::kernels< custom::OCVGetSize + , custom::OCVParseSSD + , custom::OCVBBoxes>(); + auto networks = cv::gapi::networks(obj_net); + + // Now build the graph + cv::GMat in; + auto blob = cv::gapi::infer(in); + auto rcs = custom::ParseSSD::on(blob, custom::GetSize::on(in)); + auto out = cv::gapi::wip::draw::render3ch(in, custom::BBoxes::on(rcs)); + cv::GStreamingCompiled pipeline = cv::GComputation(cv::GIn(in), cv::GOut(out)) + .compileStreaming(cv::compile_args(kernels, networks)); + + auto inputs = cv::gin(cv::gapi::wip::make_src(input)); + + // The execution part + pipeline.setSource(std::move(inputs)); + pipeline.start(); + + cv::VideoWriter writer; + + cv::Mat outMat; + while (pipeline.pull(cv::gout(outMat))) { + cv::imshow("Out", outMat); + cv::waitKey(1); + if (!output.empty()) { + if (!writer.isOpened()) { + const auto sz = cv::Size{outMat.cols, outMat.rows}; + writer.open(output, cv::VideoWriter::fourcc('M','J','P','G'), 25.0, sz); + CV_Assert(writer.isOpened()); + } + writer << outMat; + } + } + return 0; +} diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.cpp b/modules/gapi/src/backends/onnx/gonnxbackend.cpp new file mode 100644 index 0000000000..c81e032969 --- /dev/null +++ b/modules/gapi/src/backends/onnx/gonnxbackend.cpp @@ -0,0 +1,955 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "precomp.hpp" +#include "backends/onnx/gonnxbackend.hpp" + +#ifdef HAVE_ONNX + +#include // any_of +#include +#include +#include + +#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK! + +namespace cv { +namespace gimpl { +namespace onnx { + +enum TensorPosition : int { + INPUT, + OUTPUT +}; + +struct TensorInfo { + TensorInfo() = default; + explicit TensorInfo(const Ort::TensorTypeAndShapeInfo& info) + : dims(info.GetShape()) + , type(info.GetElementType()) + , is_dynamic(std::find(dims.begin(), dims.end(), -1) != dims.end()) { + if (!is_dynamic) { + size = std::accumulate(dims.begin(), + dims.end(), + static_cast(1), + std::multiplies()); + } + // Heuristic: check if the tensor is grayscale input + if (dims.size() == 4u + && dims[0] == 1 + && dims[1] == 1 + && dims[2] > 1 + && dims[3] > 1) { + is_grayscale = true; + } + } + + std::string name; + std::vector dims; + ONNXTensorElementDataType type = ONNX_TENSOR_ELEMENT_DATA_TYPE_UNDEFINED; + int64_t size = -1; + + bool normalize = true; + + bool is_dynamic = false; + bool is_grayscale = false; + + struct MeanStdev { + cv::Scalar mean; + cv::Scalar stdev; + }; + cv::util::optional mstd; +}; + +class ONNXCompiled { + // ONNX Resources + // NOTE: Env must live with the session, otherwise segfaults. + Ort::Env this_env{nullptr}; + Ort::Session this_session{nullptr}; + Ort::MemoryInfo this_memory_info{nullptr}; + + std::vector in_tensor_info; + std::vector out_tensor_info; + bool is_dynamic = false; + + // G-API description + gapi::onnx::detail::ParamDesc params; + + // Input/output tensor information + std::vector getTensorInfo(TensorPosition pos); + + // Run-time data structures + std::vector in_data; + std::vector out_data; + + void Run(const std::vector& ins, + const std::vector& outs); + +public: + explicit ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp); + + // Extract the information about output layer #i + cv::GMatDesc outMeta(int i) const; + + // Assign input/output info + std::size_t numInputs() const { return params.num_in; } + std::size_t numOutputs() const { return params.num_out; } + void setInput(int i, const cv::Mat &m); + void setOutput(int i, cv::Mat &m); + cv::Mat allocOutput(int i) const; + + // Run with the assigned inputs/outputs + void run(); +}; + +} // namespace onnx +} // namespace gimpl +} // namespace cv + +namespace { + +inline std::vector getCharNames(const std::vector& names) { + std::vector out_vec; + for (const auto& el : names) { + out_vec.push_back(el.data()); + } + return out_vec; +} + +inline int getIdxByName(const std::vector& info, const std::string& name) { + // FIXME: Cache the ordering + const auto it = std::find_if(info.begin(), info.end(), [&](const cv::gimpl::onnx::TensorInfo &i) { + return i.name == name; + }); + GAPI_Assert(it != info.end()); + return std::distance(info.begin(), it); +} + +inline int toCV(ONNXTensorElementDataType prec) { + switch (prec) { + case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U; + case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F; + default: GAPI_Assert(false && "Unsupported data type"); + } + return -1; +} + +inline std::vector toCV(const std::vector &vsz) { + std::vector result; + result.reserve(vsz.size()); + for (auto sz : vsz) { + result.push_back(ade::util::checked_cast(sz)); + } + return result; +} + +inline cv::Mat toCV(Ort::Value &v) { + auto info = v.GetTensorTypeAndShapeInfo(); + return cv::Mat(toCV(info.GetShape()), + toCV(info.GetElementType()), + reinterpret_cast(v.GetTensorMutableData())); +} + +inline std::vector toORT(const cv::MatSize &sz) { + return cv::to_own(sz); +} + +inline void preprocess(const cv::Mat& src, + const cv::gimpl::onnx::TensorInfo& ti, + cv::Mat& dst) { + GAPI_Assert(src.depth() == CV_32F || src.depth() == CV_8U); + + if (src.depth() == CV_32F) { + // Just pass the tensor as-is. + // No layout or dimension transformations done here! + // TODO: This needs to be aligned across all NN backends. + GAPI_Assert(toCV(ti.type) == CV_32F && "Only 32F model input is supported for 32F data"); + GAPI_Assert(toORT(src.size) == ti.dims && "32F tensor dimensions should match with NN input"); + GAPI_Assert(!ti.is_dynamic && "Dynamic inputs are not supported for this case"); + dst = src; + } else { + // 8U input: full preprocessing path + GAPI_Assert(src.depth() == CV_8U && "Only 8U data type is supported for preproc"); + GAPI_Assert(ti.dims.size() == 4u && "Only NCHW/NHWC layouts are supported for preproc"); + + const auto ddepth = toCV(ti.type); + GAPI_Assert((ddepth == CV_8U || ddepth == CV_32F) + && "Only 8U and 32F model input is supported for 8U data"); + + // Assess the expected input layout + const bool is_hwc = [&](int ch) { + if (ti.is_grayscale) return false; // 1,1,h,w + else if (ti.dims[3] == ch) return true; // _,_,_,c + else if (ti.dims[1] == ch) return false; // _,c,_,_ + else cv::util::throw_error(std::logic_error("Couldn't identify input tensor layout")); + } (src.channels()); + + int new_c = src.channels(); + cv::Mat csc; + if (ti.is_grayscale && new_c == 3) { + cv::cvtColor(src, csc, cv::COLOR_BGR2GRAY); + new_c = 1; + } else { + csc = src; + } + + // NHWC vs NCHW + int new_h = -1, new_w = -1; + if (ti.is_dynamic) { + // reuse h & w from the input image + new_h = src.rows; + new_w = src.cols; + } else { + // take h & w from the ONNX tensor info + new_h = ti.dims[is_hwc ? 1 : 2]; + new_w = ti.dims[is_hwc ? 2 : 3]; + } + GAPI_Assert(new_h != -1 && new_w != -1); + + cv::Mat rsz, pp; + cv::resize(csc, rsz, cv::Size(new_w, new_h)); + if (src.depth() == CV_8U && ddepth == CV_32F) { + rsz.convertTo(pp, ddepth, ti.normalize ? 1.f / 255 : 1.f); + if (ti.mstd.has_value()) { + pp -= ti.mstd->mean; + pp /= ti.mstd->stdev; + } + } else { + pp = rsz; + } + + if (!is_hwc && new_c > 1) { + // Convert to CHW + dst.create(cv::Size(new_w, new_h * new_c), ddepth); + std::vector planes(new_c); + for (int ch = 0; ch < new_c; ++ch) { + planes[ch] = dst.rowRange(ch * new_h, (ch + 1) * new_h); + } + cv::split(pp, planes); + } else { + // Keep HWC + dst = pp; + } + + // Ensure dst is a tensor shape (not a 2D image) + if (ti.is_dynamic) { + // Reshape to input dimensions + const std::vector out_dims = is_hwc + ? std::vector{1, new_h, new_w, new_c} + : std::vector{1, new_c, new_h, new_w}; + dst = dst.reshape(1, out_dims); + } else { + // Reshape to ONNX dimensions (no -1s there!) + dst = dst.reshape(1, toCV(ti.dims)); + } + } +} + +template +inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info, + const cv::gimpl::onnx::TensorInfo& tensor_params, + const cv::Mat& data) { + (void) tensor_params; + auto ort_dims = toORT(data.size); + return Ort::Value::CreateTensor(memory_info, + const_cast(data.ptr()), + data.total(), + ort_dims.data(), + ort_dims.size()); +} + +inline Ort::Value createTensor(const Ort::MemoryInfo& memory_info, + const cv::gimpl::onnx::TensorInfo& tensor_params, + const cv::Mat& data) { + GAPI_Assert(data.isContinuous ()); + switch (tensor_params.type) { + case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: + return createTensor(memory_info, tensor_params, data); + case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: + return createTensor(memory_info, tensor_params, data); + default: + GAPI_Assert(false && "Unsupported data type"); + } + return Ort::Value{nullptr}; +} + +struct ONNXUnit { + static const char *name() { return "ONNXModelConfig"; } + + std::shared_ptr oc; + + explicit ONNXUnit(const cv::gapi::onnx::detail::ParamDesc &pp) + : oc(new cv::gimpl::onnx::ONNXCompiled(pp)) { + } +}; + +struct ONNXCallContext { + // Input parameters passed to an inference operation. + std::vector args; + + //FIXME: avoid conversion of arguments from internal representation to OpenCV one on each call + //to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run, + //once on enter for input and output arguments, and once before return for output arguments only + //FIXME: check if the above applies to this backend (taken from CPU) + std::unordered_map results; + + // Generic accessor API + template + const T& inArg(std::size_t input) { return args.at(input).get(); } + + // Syntax sugar + const cv::Mat& inMat(std::size_t input) { + return inArg(input); + } + cv::Mat& outMatR(std::size_t output) { + return *cv::util::get(results.at(output)); + } + + template std::vector& outVecR(std::size_t output) { // FIXME: the same issue + return outVecRef(output).wref(); + } + cv::detail::VectorRef& outVecRef(std::size_t output) { + return cv::util::get(results.at(output)); + } +}; + +struct ONNXCallable { + static const char *name() { return "ONNXRequestCallable"; } + using Run = std::function; + Run run; +}; + +struct KImpl { + cv::gimpl::CustomMetaFunction::CM customMetaFunc; + ONNXCallable::Run run; +}; + +// FIXME: Is there a way to take a typed graph (our GModel), +// and create a new typed graph _ATOP_ of that (by extending with a couple of +// new types?). +// Alternatively, is there a way to compose types graphs? +// +// If not, we need to introduce that! +using GONNXModel = ade::TypedGraph + < cv::gimpl::Protocol + , cv::gimpl::Op + , cv::gimpl::NetworkParams + , cv::gimpl::CustomMetaFunction + , ONNXUnit + , ONNXCallable + >; + +// FIXME: Same issue with Typed and ConstTyped +using GConstGONNXModel = ade::ConstTypedGraph + < cv::gimpl::Protocol + , cv::gimpl::Op + , cv::gimpl::NetworkParams + , cv::gimpl::CustomMetaFunction + , ONNXUnit + , ONNXCallable + >; +} // anonymous namespace + +// GCPUExcecutable implementation ////////////////////////////////////////////// +cv::gimpl::onnx::GONNXExecutable::GONNXExecutable(const ade::Graph &g, + const std::vector &nodes) + : m_g(g), m_gm(m_g) { + // FIXME: Currently this backend is capable to run a single inference node only. + // Need to extend our island fusion with merge/not-to-merge decision making parametrization + GConstGONNXModel iem(g); + + for (auto &nh : nodes) { + switch (m_gm.metadata(nh).get().t) { + case NodeType::OP: + if (this_nh == nullptr) { + this_nh = nh; + } + else { + util::throw_error(std::logic_error("Multi-node inference is not supported!")); + } + break; + + case NodeType::DATA: { + m_dataNodes.push_back(nh); + const auto &desc = m_gm.metadata(nh).get(); + if (desc.storage == Data::Storage::CONST_VAL) { + util::throw_error(std::logic_error("No const data supported in backend!")); + } + if (desc.storage == Data::Storage::INTERNAL) { + util::throw_error(std::logic_error("No internal data supported in backend!")); + } + break; + } + default: util::throw_error(std::logic_error("Unsupported NodeType")); + } + } +} + +// FIXME: Document what it does +cv::GArg cv::gimpl::onnx::GONNXExecutable::packArg(const cv::GArg &arg) { + // No API placeholders allowed at this point + // FIXME: this check has to be done somewhere in compilation stage. + GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT + && arg.kind != cv::detail::ArgKind::GSCALAR + && arg.kind != cv::detail::ArgKind::GARRAY + && arg.kind != cv::detail::ArgKind::GOPAQUE); + + if (arg.kind != cv::detail::ArgKind::GOBJREF) { + util::throw_error(std::logic_error("Inference supports G-types ONLY!")); + } + GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF); + + // Wrap associated CPU object (either host or an internal one) + // FIXME: object can be moved out!!! GExecutor faced that. + const cv::gimpl::RcDesc &ref = arg.get(); + switch (ref.shape) + { + case GShape::GMAT: return GArg(m_res.slot()[ref.id]); + + // Note: .at() is intentional for GArray as object MUST be already there + // (and constructed by either bindIn/Out or resetInternal) + case GShape::GARRAY: return GArg(m_res.slot().at(ref.id)); + + // Note: .at() is intentional for GOpaque as object MUST be already there + // (and constructed by either bindIn/Out or resetInternal) + case GShape::GOPAQUE: return GArg(m_res.slot().at(ref.id)); + + default: + util::throw_error(std::logic_error("Unsupported GShape type")); + break; + } +} + +void cv::gimpl::onnx::GONNXExecutable::run(std::vector &&input_objs, + std::vector &&output_objs) { + // Update resources with run-time information - what this Island + // has received from user (or from another Island, or mix...) + // FIXME: Check input/output objects against GIsland protocol + + for (auto& it : input_objs) magazine::bindInArg (m_res, it.first, it.second); + for (auto& it : output_objs) magazine::bindOutArg(m_res, it.first, it.second); + + // FIXME: Running just a single node now. + // Not sure if need to support many of them, though + // FIXME: Make this island-unmergeable? + const auto &op = m_gm.metadata(this_nh).get(); + + // Initialize kernel's execution context: + // - Input parameters + ONNXCallContext context; + context.args.reserve(op.args.size()); + using namespace std::placeholders; + ade::util::transform(op.args, + std::back_inserter(context.args), + std::bind(&GONNXExecutable::packArg, this, _1)); + + // - Output parameters. + for (const auto &out_it : ade::util::indexed(op.outs)) { + // FIXME: Can the same GArg type resolution mechanism be reused here? + const auto out_port = ade::util::index(out_it); + const auto out_desc = ade::util::value(out_it); + context.results[out_port] = magazine::getObjPtr(m_res, out_desc); + } + + // And now trigger the execution + GConstGONNXModel giem(m_g); + const auto &uu = giem.metadata(this_nh).get(); + const auto &kk = giem.metadata(this_nh).get(); + kk.run(uu, context); + + for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second); +} + +namespace cv { +namespace gimpl { +namespace onnx { + +ONNXCompiled::ONNXCompiled(const gapi::onnx::detail::ParamDesc &pp) + : params(pp) { + + // Validate input parameters before allocating any resources + if (params.num_in > 1u && params.num_in != params.input_names.size()) { + cv::util::throw_error(std::logic_error("Please specify input layer names for " + + params.model_path)); + } + if (params.num_out > 1u && params.num_out != params.output_names.size()) { + cv::util::throw_error(std::logic_error("Please specify output layer names for " + + params.model_path)); + } + + // Create and initialize the ONNX session + Ort::SessionOptions session_options; + this_env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, ""); + this_session = Ort::Session(this_env, params.model_path.data(), session_options); + this_memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); + + in_tensor_info = getTensorInfo(INPUT); + out_tensor_info = getTensorInfo(OUTPUT); + + const auto is_dyn = [](const TensorInfo &ti) { + return ti.is_dynamic; + }; + is_dynamic = ade::util::any_of(in_tensor_info, is_dyn) + || ade::util::any_of(out_tensor_info, is_dyn); + if (is_dynamic && !params.custom_post_proc) { + util::throw_error(std::logic_error("This network has dynamic shapes. " + "Please provide a custom post-processing function " + "(.cfgPostProc) in network parameters")); + } + + // Update parameters based on session information + if (params.num_in == 1u && params.input_names.empty()) { + params.input_names = { in_tensor_info.front().name }; + } + if (params.num_out == 1u && params.output_names.empty()) { + params.output_names = { out_tensor_info.front().name }; + } + + // Validate what is supported currently + GAPI_Assert(params.const_inputs.empty() + && "Const inputs are not currently supported"); + GAPI_Assert(std::all_of(in_tensor_info.begin(), + in_tensor_info.end(), + [](const cv::gimpl::onnx::TensorInfo &p) { + return p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT + || p.type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8; + }) + && "Only FP32 and U8 inputs for NN are supported"); + + // Put mean and std in appropriate tensor params + if (!params.mean.empty() || !params.stdev.empty()) { + GAPI_Assert(params.mean.size() == params.stdev.size() && + params.mean.size() == params.input_names.size()); + for (auto idx : ade::util::iota(params.num_in)) { + const auto ort_idx = getIdxByName(in_tensor_info, params.input_names[idx]); + using M = TensorInfo::MeanStdev; + in_tensor_info[ort_idx].mstd = util::make_optional(M{ params.mean[idx] + , params.stdev[idx] }); + } + } + + // Update normalize flags for input tensors + if (!params.normalize.empty()) { + for (auto idx : ade::util::iota(params.num_in)) { + const auto ort_idx = getIdxByName(in_tensor_info, params.input_names[idx]); + in_tensor_info[ort_idx].normalize = params.normalize[idx]; + } + } + + // Pre-allocate vectors (not buffers) for runtime info + in_data.resize(params.num_in); + out_data.resize(params.num_out); +} + +std::vector ONNXCompiled::getTensorInfo(TensorPosition pos) { + GAPI_Assert(pos == INPUT || pos == OUTPUT); + + const auto num_nodes = pos == INPUT + ? this_session.GetInputCount() + : this_session.GetOutputCount(); + + std::vector tensor_info; + tensor_info.reserve(num_nodes); + + Ort::AllocatorWithDefaultOptions allocator; + for (auto i : ade::util::iota(num_nodes)) { + const auto info = pos == INPUT + ? this_session.GetInputTypeInfo(i) + : this_session.GetOutputTypeInfo(i); + tensor_info.emplace_back(info.GetTensorTypeAndShapeInfo()); + + char *name_p = pos == INPUT + ? this_session.GetInputName(i, allocator) + : this_session.GetOutputName(i, allocator); + tensor_info.back().name = name_p; + allocator.Free(name_p); + } + + return tensor_info; +} + +cv::GMatDesc ONNXCompiled::outMeta(int idx) const { + if (is_dynamic) { + GAPI_Assert(!params.out_metas.empty() + && "Metadata must be specified if NN has dynamic inputs!"); + return params.out_metas.at(idx); + } + const auto ort_idx = getIdxByName(out_tensor_info, params.output_names[idx]); + return cv::GMatDesc(toCV(out_tensor_info[ort_idx].type), + toCV(out_tensor_info[ort_idx].dims)); +} + +void ONNXCompiled::setInput(int i, const cv::Mat &m) { + const auto in_idx = i; + const auto in_name = params.input_names[in_idx]; + const auto ort_idx = getIdxByName(in_tensor_info, in_name); + preprocess(m, in_tensor_info[ort_idx], in_data[in_idx]); +} + +void ONNXCompiled::setOutput(int i, cv::Mat &m) { + // FIXME: No need in double-indexing? + out_data[i] = m; +} + +cv::Mat ONNXCompiled::allocOutput(int i) const { + cv::Mat m; + m.create(toCV(out_tensor_info[i].dims), + toCV(out_tensor_info[i].type)); + return m; +} + +void ONNXCompiled::Run(const std::vector& ins, + const std::vector& outs) { + std::vector in_tensors, out_tensors; + + auto in_run_names = getCharNames(params.input_names); + + for (const auto it : ade::util::indexed(params.input_names)) { + auto i = ade::util::index(it); + auto in_name = ade::util::value(it); + const auto idx = getIdxByName(in_tensor_info, in_name); + in_tensors.emplace_back(createTensor(this_memory_info, + in_tensor_info[idx], + ins[i])); + } + + if (!is_dynamic) { + // Easy path - just run the session which is bound to G-API's + // internal data + for (auto i : ade::util::iota(params.output_names.size())) { + out_tensors.emplace_back(createTensor(this_memory_info, + out_tensor_info[i], + outs[i])); + } + auto out_run_names = getCharNames(params.output_names); + this_session.Run(Ort::RunOptions{nullptr}, + in_run_names.data(), + &in_tensors.front(), + params.input_names.size(), + out_run_names.data(), + &out_tensors.front(), + params.output_names.size()); + } else { + // Hard path - run session & user-defined post-processing + // NOTE: use another list of output names here + std::vector out_names; + for (auto &&ti : out_tensor_info) { + out_names.push_back(ti.name.c_str()); + } + + auto outputs = this_session.Run(Ort::RunOptions{nullptr}, + in_run_names.data(), + &in_tensors.front(), + params.input_names.size(), + out_names.data(), + out_names.size()); + std::unordered_map onnx_outputs; + std::unordered_map gapi_outputs; + + GAPI_Assert(outputs.size() == out_names.size()); + // Fill in ONNX tensors + for (auto &&iter : ade::util::zip(ade::util::toRange(out_tensor_info), + ade::util::toRange(outputs))) { + const auto &out_name = std::get<0>(iter).name; + auto &out_tensor = std::get<1>(iter); + onnx_outputs[out_name] = toCV(out_tensor); + } + + // Fill in G-API outputs + for (auto &&it: ade::util::indexed(params.output_names)) { + gapi_outputs[ade::util::value(it)] = outs[ade::util::index(it)]; + } + params.custom_post_proc(onnx_outputs, gapi_outputs); + } +} + +void ONNXCompiled::run() { + Run(in_data, out_data); +} + +struct Infer: public cv::detail::KernelTag { + using API = cv::GInferBase; + static cv::gapi::GBackend backend() { return cv::gapi::onnx::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + cv::GMetaArgs result; + + GConstGONNXModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + + GAPI_Assert(uu.oc->numInputs() == in_metas.size() + && "Known input layers count doesn't match input meta count"); + for (auto &&mm : in_metas) { + GAPI_Assert(util::holds_alternative(mm) + && "Non-GMat inputs are not supported"); + } + for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) { + result.emplace_back(uu.oc->outMeta(idx)); + } + return result; + } + + static void run(const ONNXUnit &uu, ONNXCallContext &ctx) { + for (auto &&idx : ade::util::iota(uu.oc->numInputs())) { + uu.oc->setInput(idx, ctx.inMat(idx)); + } + for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) { + uu.oc->setOutput(idx, ctx.outMatR(idx)); + } + uu.oc->run(); + } +}; + +struct InferROI: public cv::detail::KernelTag { + using API = cv::GInferROIBase; + static cv::gapi::GBackend backend() { return cv::gapi::onnx::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + cv::GMetaArgs result; + + GConstGONNXModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + GAPI_Assert(1u == uu.oc->numInputs()); + GAPI_Assert(2u == in_metas.size()); + + for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) { + result.emplace_back(uu.oc->outMeta(idx)); + } + return result; + } + + static void run(const ONNXUnit &uu, ONNXCallContext &ctx) { + // non-generic version for now, per the InferROI's definition + GAPI_Assert(uu.oc->numInputs() == 1u); + const auto& this_roi = ctx.inArg(0).rref(); + const auto this_mat = ctx.inMat(1); + + uu.oc->setInput(0, this_mat(this_roi)); + for (auto &&idx : ade::util::iota(uu.oc->numOutputs())) { + uu.oc->setOutput(idx, ctx.outMatR(idx)); + } + uu.oc->run(); + } +}; + +struct InferList: public cv::detail::KernelTag { + using API = cv::GInferListBase; + static cv::gapi::GBackend backend() { return cv::gapi::onnx::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + GConstGONNXModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + + // Note our input layers list order matches the API order and so + // meta order. + GAPI_Assert(uu.oc->numInputs() == (in_metas.size() - 1u) + && "Known input layers count doesn't match input meta count"); + + for (auto i : ade::util::iota(uu.oc->numInputs())) { + const auto & mm = in_metas[i + 1]; + + GAPI_Assert(util::holds_alternative(mm) + && "Non-GMat inputs are not supported"); + } + + // roi-list version is much easier at the moment. + // All our outputs are vectors which don't have + // metadata at the moment - so just create a vector of + // "empty" array metadatas of the required size. + return cv::GMetaArgs(uu.oc->numOutputs(), + cv::GMetaArg{cv::empty_array_desc()}); + } + + static void run(const ONNXUnit &uu, ONNXCallContext &ctx) { + // non-generic version for now: + // - assumes input 0 is always ROI list + // - assumes all inputs/outputs are always Mats + GAPI_Assert(uu.oc->numInputs() == 1); // roi list is not counted in net's inputs + + const auto& in_roi_vec = ctx.inArg(0u).rref(); + const cv::Mat this_mat = ctx.inMat(1u); + + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + ctx.outVecR(i).clear(); + } + for (const auto &rc : in_roi_vec) { + uu.oc->setInput(0, this_mat(rc)); + std::vector out_mats(uu.oc->numOutputs()); + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + out_mats[i] = uu.oc->allocOutput(i); + uu.oc->setOutput(i, out_mats[i]); + } + uu.oc->run(); + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + std::vector &out_vec = ctx.outVecR(i); + out_vec.push_back(std::move(out_mats[i])); + } + } + } +}; + +struct InferList2: public cv::detail::KernelTag { + using API = cv::GInferList2Base; + static cv::gapi::GBackend backend() { return cv::gapi::onnx::backend(); } + static KImpl kernel() { return KImpl{outMeta, run}; } + + static cv::GMetaArgs outMeta(const ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GMetaArgs &in_metas, + const cv::GArgs &/*in_args*/) { + + GConstGONNXModel gm(gr); + const auto &uu = gm.metadata(nh).get(); + + // Note our input layers list order matches the API order and so + // meta order. + GAPI_Assert(uu.oc->numInputs() == (in_metas.size() - 1u) + && "Known input layers count doesn't match input meta count"); + + // In contrast to InferList, the InferList2 has only one + // "full-frame" image argument, and all the rest are arrays of + // ether ROI or blobs. So here we set the 0th arg image format + // to all inputs which are ROI-based (skipping the + // "blob"-based ones) + // FIXME: this is filtering not done, actually! GArrayDesc has + // no hint for type! + const auto &mm_0 = in_metas[0u]; + const auto &meta_0 = util::get(mm_0); + GAPI_Assert( !meta_0.isND() + && !meta_0.planar + && "Only images are supported as the 0th argument"); + for (auto i : ade::util::iota(uu.oc->numInputs())) { + const auto &mm = in_metas[i + 1]; + GAPI_Assert(util::holds_alternative(mm) + && "Non-array inputs are not supported"); + } + + // roi-list version is much easier at the moment. + // All our outputs are vectors which don't have + // metadata at the moment - so just create a vector of + // "empty" array metadatas of the required size. + return cv::GMetaArgs(uu.oc->numOutputs(), + cv::GMetaArg{cv::empty_array_desc()}); + } + + static void run(const ONNXUnit &uu, ONNXCallContext &ctx) { + GAPI_Assert(ctx.args.size() > 1u + && "This operation must have at least two arguments"); + + // Since we do a ROI list inference, always assume our input buffer is image + const cv::Mat mat_0 = ctx.inMat(0u); + // Take the next argument, which must be vector (of any kind). + // Use this only to obtain the ROI list size (sizes of all + // other vectors must be equal to this one) + const auto list_size = ctx.inArg(1u).size(); + + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + ctx.outVecR(i).clear(); + } + // For every ROI in the list {{{ + for (const auto &list_idx : ade::util::iota(list_size)) { + std::vector in_tensors, out_tensors; + std::vector in_mats(uu.oc->numInputs()); + // For every input of the net {{{ + for (auto in_idx : ade::util::iota(uu.oc->numInputs())) { + const auto &this_vec = ctx.inArg(in_idx+1u); + GAPI_Assert(this_vec.size() == list_size); + // Prepare input {{{ + // FIXME: Terrible run-time logic based on RTTI! + // FIXME: Will never work on non-RTTI systems! + // FIXME: Need to replace with a static type tags + // (like with serialization) instead! + if (this_vec.holds()) { + // ROI case - create an ROI blob + const auto &vec = this_vec.rref(); + uu.oc->setInput(in_idx, mat_0(vec[list_idx])); + } else if (this_vec.holds()) { + // Mat case - create a regular blob + // FIXME: NOW Assume Mats are always BLOBS (not + // images) + const auto &vec = this_vec.rref(); + uu.oc->setInput(in_idx, vec[list_idx]); + } else { + GAPI_Assert(false && "Only Rect and Mat types are supported for infer list 2!"); + } + // }}} (Preapre input) + } // }}} (For every input of the net) + + std::vector out_mats(uu.oc->numOutputs()); + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + out_mats[i] = uu.oc->allocOutput(i); + uu.oc->setOutput(i, out_mats[i]); + } + uu.oc->run(); + + for (auto i : ade::util::iota(uu.oc->numOutputs())) { + std::vector &out_vec = ctx.outVecR(i); + out_vec.push_back(std::move(out_mats[i])); + } + } // }}} (For every ROI in the list) + } +}; + +} // namespace onnx +} // namespace gapi +} // namespace cv + +namespace { + class GONNXBackendImpl final: public cv::gapi::GBackend::Priv { + virtual void unpackKernel(ade::Graph &gr, + const ade::NodeHandle &nh, + const cv::GKernelImpl &ii) override { + using namespace cv::gimpl; + // FIXME: Introduce a DNNBackend interface which'd specify + // the framework for this??? + GONNXModel gm(gr); + const auto &np = gm.metadata(nh).get(); + const auto &pp = cv::util::any_cast(np.opaque); + const auto &ki = cv::util::any_cast(ii.opaque); + gm.metadata(nh).set(ONNXUnit{pp}); + gm.metadata(nh).set(ONNXCallable{ki.run}); + gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc}); + } + + virtual EPtr compile(const ade::Graph &graph, + const cv::GCompileArgs &, + const std::vector &nodes) const override { + return EPtr{new cv::gimpl::onnx::GONNXExecutable(graph, nodes)}; + } + + virtual cv::gapi::GKernelPackage auxiliaryKernels() const override { + return cv::gapi::kernels< cv::gimpl::onnx::Infer + , cv::gimpl::onnx::InferROI + , cv::gimpl::onnx::InferList + , cv::gimpl::onnx::InferList2 + >(); + } + }; +} + +cv::gapi::GBackend cv::gapi::onnx::backend() { + static cv::gapi::GBackend this_backend(std::make_shared()); + return this_backend; +} +#else // HAVE_ONNX + +cv::gapi::GBackend cv::gapi::onnx::backend() { + // Still provide this symbol to avoid linking issues + util::throw_error(std::runtime_error("G-API has been compiled without ONNX support")); +} +#endif // HAVE_ONNX diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.hpp b/modules/gapi/src/backends/onnx/gonnxbackend.hpp new file mode 100644 index 0000000000..a3cc897030 --- /dev/null +++ b/modules/gapi/src/backends/onnx/gonnxbackend.hpp @@ -0,0 +1,56 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#ifndef OPENCV_GAPI_GONNXBACKEND_HPP +#define OPENCV_GAPI_GONNXBACKEND_HPP + +#include "opencv2/gapi/infer/onnx.hpp" +#ifdef HAVE_ONNX + +#include +#include // type_list_index + +#include "backends/common/gbackend.hpp" + +namespace cv { +namespace gimpl { +namespace onnx { + +class GONNXExecutable final: public GIslandExecutable +{ + const ade::Graph &m_g; + GModel::ConstGraph m_gm; + + // The only executable stuff in this graph + // (assuming it is always single-op) + ade::NodeHandle this_nh; + + // List of all resources in graph (both internal and external) + std::vector m_dataNodes; + + // Actual data of all resources in graph (both internal and external) + Mag m_res; + + // Execution helpers + GArg packArg(const GArg &arg); + +public: + GONNXExecutable(const ade::Graph &graph, + const std::vector &nodes); + + virtual inline bool canReshape() const override { return false; } + virtual inline void reshape(ade::Graph&, const GCompileArgs&) override { + GAPI_Assert(false); // Not implemented yet + } + + virtual void run(std::vector &&input_objs, + std::vector &&output_objs) override; +}; + +}}} // namespace cv::gimpl::onnx + +#endif // HAVE_ONNX +#endif // OPENCV_GAPI_GONNXBACKEND_HPP diff --git a/modules/gapi/src/compiler/passes/kernels.cpp b/modules/gapi/src/compiler/passes/kernels.cpp index 100a32ec57..837e21f19a 100644 --- a/modules/gapi/src/compiler/passes/kernels.cpp +++ b/modules/gapi/src/compiler/passes/kernels.cpp @@ -141,6 +141,7 @@ void cv::gimpl::passes::bindNetParams(ade::passes::PassContext &ctx, continue; pgr.metadata(nh).set(NetworkParams{it->params}); + op.backend = it->backend; } } } @@ -181,13 +182,25 @@ void cv::gimpl::passes::resolveKernels(ade::passes::PassContext &ctx, // of the same kernel to be presented in the kernel // package (as it was designed originally). - cv::gapi::GBackend selected_backend; - cv::GKernelImpl selected_impl; - std::tie(selected_backend, selected_impl) = kernels.lookup(op.k.name); + cv::GKernelImpl selected_impl; - selected_backend.priv().unpackKernel(ctx.graph, nh, selected_impl); - op.backend = selected_backend; - active_backends.insert(selected_backend); + if (op.backend == cv::gapi::GBackend()) { + std::tie(op.backend, selected_impl) = kernels.lookup(op.k.name); + } else { + // FIXME: This needs to be reworked properly + // Lookup for implementation from the pre-assinged backend + cv::gapi::GBackend dummy; + std::tie(dummy, selected_impl) = op.backend.priv() + .auxiliaryKernels().lookup(op.k.name); + // FIXME: Warning here! + // This situation may happen when NN (infer) backend was assigned + // by tag in bindNetParams (see above) but at this stage the operation + // lookup resulted in another backend (and it is perfectly valid when + // we have multiple NN backends available). + } + + op.backend.priv().unpackKernel(ctx.graph, nh, selected_impl); + active_backends.insert(op.backend); if (gr.metadata().contains()) { diff --git a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp new file mode 100644 index 0000000000..ebb8020e9a --- /dev/null +++ b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp @@ -0,0 +1,278 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "../test_precomp.hpp" + +#ifdef HAVE_ONNX + +#include +#include +#include + +#include + +namespace { + +struct ONNXInitPath { + ONNXInitPath() { + const char* env_path = getenv("OPENCV_GAPI_ONNX_MODEL_PATH"); + if (env_path) + cvtest::addDataSearchPath(env_path); + } +}; +static ONNXInitPath g_init_path; + +cv::Mat initMatrixRandU(int type, cv::Size sz_in) +{ + cv::Mat in_mat1 = cv::Mat(sz_in, type); + + if (CV_MAT_DEPTH(type) < CV_32F) + { + cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255)); + } + else + { + const int fscale = 256; // avoid bits near ULP, generate stable test input + cv::Mat in_mat32s(in_mat1.size(), CV_MAKE_TYPE(CV_32S, CV_MAT_CN(type))); + cv::randu(in_mat32s, cv::Scalar::all(0), cv::Scalar::all(255 * fscale)); + in_mat32s.convertTo(in_mat1, type, 1.0f / fscale, 0); + } + return in_mat1; +} +} +namespace opencv_test +{ +namespace { +// FIXME: taken from the DNN module +void normAssert(cv::InputArray ref, cv::InputArray test, + const char *comment /*= ""*/, + double l1 = 0.00001, double lInf = 0.0001) +{ + double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); + EXPECT_LE(normL1, l1) << comment; + + double normInf = cvtest::norm(ref, test, cv::NORM_INF); + EXPECT_LE(normInf, lInf) << comment; +} + +std::string findModel(const std::string &model_name) +{ + return findDataFile("vision/classification/squeezenet/model/" + model_name + ".onnx", false); +} + +inline void preprocess(const cv::Mat& src, + cv::Mat& dst, + const cv::Scalar& mean, + const cv::Scalar& std) { + int new_h = 224; + int new_w = 224; + cv::Mat tmp, nmat, cvt; + cv::resize(src, dst, cv::Size(new_w, new_h)); + dst.convertTo(cvt, CV_32F, 1.f / 255); + nmat = cvt - mean; + tmp = nmat / std; + dst.create(cv::Size(new_w, new_h * src.channels()), CV_32F); + std::vector planes; + for (int i = 0; i < src.channels(); ++i) { + planes.push_back(dst.rowRange(i * new_h, (i + 1) * new_h)); + } + cv::split(tmp, planes); +} + +void InferONNX(const std::string& model_path, + const cv::Mat& in, + cv::Mat& out, + const cv::Scalar& mean, + const cv::Scalar& std) +{ + // FIXME: It must be a FIXTURE test! + Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test"); + Ort::SessionOptions session_options; + Ort::Session session(env, model_path.data(), session_options); + auto input_node_dims = // 0 - one input + session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape(); + auto output_node_dims = // 0 - one output + session.GetOutputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape(); + Ort::AllocatorWithDefaultOptions allocator; + char* in_node_name_p = session.GetInputName(0, allocator); + char* out_node_name_p = session.GetOutputName(0, allocator); + std::string in_node_name(in_node_name_p); + std::string out_node_name(out_node_name_p); + allocator.Free(in_node_name_p); + allocator.Free(out_node_name_p); + + auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); + cv::Mat dst; + preprocess(in, dst, mean, std); + + out.create(std::vector(output_node_dims.begin(), + output_node_dims.end()), CV_32F); // empty output Mat + auto in_tensor = Ort::Value::CreateTensor(memory_info, + dst.ptr(), + dst.total(), + input_node_dims.data(), + input_node_dims.size()); + auto out_tensor = Ort::Value::CreateTensor(memory_info, + out.ptr(), + out.total(), + output_node_dims.data(), + output_node_dims.size()); + std::vector in_names = {in_node_name.data()}; + std::vector out_names = {out_node_name.data()}; + session.Run(Ort::RunOptions{nullptr}, + in_names.data(), + &in_tensor, + session.GetInputCount(), + out_names.data(), + &out_tensor, + session.GetOutputCount()); +} + +} // anonymous namespace + +TEST(ONNX, Infer) +{ + cv::Mat in_mat1, out_gapi, out_onnx; + std::string model_path = findModel("squeezenet1.0-9"); + // NOTE: All tests chek "random" image + // Ideally it should be a real image + in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); + + cv::Scalar mean = { 0.485, 0.456, 0.406 }; + cv::Scalar std = { 0.229, 0.224, 0.225 }; + + // ONNX_API code + InferONNX(model_path, in_mat1, out_onnx, mean, std); + + // G_API code + G_API_NET(SqueezNet, , "squeeznet"); + cv::GMat in; + cv::GMat out = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out)); + // NOTE: We have to normalize U8 tensor + // so cfgMeanStd() is here + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + comp.apply(cv::gin(in_mat1), + cv::gout(out_gapi), + cv::compile_args(cv::gapi::networks(net))); + + // Validate + ASSERT_EQ(1000u, out_onnx.total()); + ASSERT_EQ(1000u, out_gapi.total()); + normAssert(out_onnx, out_gapi, "Test classification output"); +} + +TEST(ONNX, InferROI) +{ + cv::Mat in_mat1, out_gapi, out_onnx; + std::string model_path = findModel("squeezenet1.0-9"); + in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); + + cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean + cv::Scalar std = { 0.229, 0.224, 0.225 }; // squeeznet std + + cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250}); + // ONNX_API code + InferONNX(model_path, in_mat1(ROI), out_onnx, mean, std); + + // G_API code + G_API_NET(SqueezNet, , "squeeznet"); + cv::GMat in; + cv::GOpaque rect; + cv::GMat out = cv::gapi::infer(rect, in); + cv::GComputation comp(cv::GIn(in, rect), cv::GOut(out)); + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + comp.apply(cv::gin(in_mat1, ROI), + cv::gout(out_gapi), + cv::compile_args(cv::gapi::networks(net))); + + // Validate + ASSERT_EQ(1000u, out_onnx.total()); + ASSERT_EQ(1000u, out_gapi.total()); + normAssert(out_onnx, out_gapi, "Test classification output"); +} + +TEST(ONNX, InferROIList) +{ + cv::Mat in_mat1; + std::string model_path = findModel("squeezenet1.0-9"); + in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); + + cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean + cv::Scalar std = { 0.229, 0.224, 0.225 }; // squeeznet std + + std::vector rois = { + cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), + cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), + }; + std::vector out_gapi; + std::vector out_onnx(rois.size()); + // ONNX_API code + for (size_t i = 0; i < rois.size(); ++i) { + InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std); + } + + // G_API code + G_API_NET(SqueezNet, , "squeeznet"); + cv::GMat in; + cv::GArray rr; + cv::GArray out = cv::gapi::infer(rr, in); + cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out)); + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + comp.apply(cv::gin(in_mat1, rois), + cv::gout(out_gapi), + cv::compile_args(cv::gapi::networks(net))); + + // Validate + for (size_t i = 0; i < rois.size(); ++i) { + ASSERT_EQ(1000u, out_onnx[i].total()); + ASSERT_EQ(1000u, out_gapi[i].total()); + normAssert(out_onnx[i], out_gapi[i], "Test classification output"); + } +} + +TEST(ONNX, Infer2ROIList) +{ + cv::Mat in_mat1; + std::string model_path = findModel("squeezenet1.0-9"); + in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); + + cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean + cv::Scalar std = { 0.229, 0.224, 0.225 }; // squeeznet std + + std::vector rois = { + cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), + cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), + }; + std::vector out_gapi; + std::vector out_onnx(rois.size()); + // ONNX_API code + for (size_t i = 0; i < rois.size(); ++i) { + InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std); + } + + // G_API code + G_API_NET(SqueezNet, , "squeeznet"); + cv::GMat in; + cv::GArray rr; + cv::GArray out = cv::gapi::infer2(in,rr); + cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out)); + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + comp.apply(cv::gin(in_mat1, rois), + cv::gout(out_gapi), + cv::compile_args(cv::gapi::networks(net))); + + // Validate + for (size_t i = 0; i < rois.size(); ++i) { + ASSERT_EQ(1000u, out_onnx[i].total()); + ASSERT_EQ(1000u, out_gapi[i].total()); + normAssert(out_onnx[i], out_gapi[i], "Test classification output"); + } +} + +} // namespace opencv_test + +#endif // HAVE_ONNX From 039795b4051456ee4f84f4b2359bdbcf491fba3f Mon Sep 17 00:00:00 2001 From: Mark Shachkov Date: Tue, 3 Nov 2020 21:54:56 +0300 Subject: [PATCH 064/152] Change naming of keypoints comparator --- modules/features2d/src/keypoint.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/features2d/src/keypoint.cpp b/modules/features2d/src/keypoint.cpp index 219634e5b4..bab1e22b45 100644 --- a/modules/features2d/src/keypoint.cpp +++ b/modules/features2d/src/keypoint.cpp @@ -44,9 +44,9 @@ namespace cv { -struct KeypointResponseGreaterThanThreshold +struct KeypointResponseGreaterThanOrEqualToThreshold { - KeypointResponseGreaterThanThreshold(float _value) : + KeypointResponseGreaterThanOrEqualToThreshold(float _value) : value(_value) { } @@ -83,7 +83,7 @@ void KeyPointsFilter::retainBest(std::vector& keypoints, int n_points) //use std::partition to grab all of the keypoints with the boundary response. std::vector::const_iterator new_end = std::partition(keypoints.begin() + n_points, keypoints.end(), - KeypointResponseGreaterThanThreshold(ambiguous_response)); + KeypointResponseGreaterThanOrEqualToThreshold(ambiguous_response)); //resize the keypoints, given this new end point. nth_element and partition reordered the points inplace keypoints.resize(new_end - keypoints.begin()); } From 21a8d9569d8de225509225193e9c02e622ff2702 Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Thu, 5 Nov 2020 13:11:31 +0300 Subject: [PATCH 065/152] videoio: added frameSize to MFX capture --- modules/videoio/src/cap_mfx_reader.cpp | 20 +++++++++++++++++--- modules/videoio/src/cap_mfx_reader.hpp | 1 + modules/videoio/test/test_mfx.cpp | 2 ++ 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/modules/videoio/src/cap_mfx_reader.cpp b/modules/videoio/src/cap_mfx_reader.cpp index 7df2cf56af..2f85bbe02a 100644 --- a/modules/videoio/src/cap_mfx_reader.cpp +++ b/modules/videoio/src/cap_mfx_reader.cpp @@ -111,6 +111,7 @@ VideoCapture_IntelMFX::VideoCapture_IntelMFX(const cv::String &filename) return; } + frameSize = Size(params.mfx.FrameInfo.CropW, params.mfx.FrameInfo.CropH); good = true; } @@ -126,10 +127,23 @@ VideoCapture_IntelMFX::~VideoCapture_IntelMFX() cleanup(deviceHandler); } -double VideoCapture_IntelMFX::getProperty(int) const +double VideoCapture_IntelMFX::getProperty(int prop) const { - MSG(cerr << "MFX: getProperty() is not implemented" << endl); - return 0; + if (!good) + { + MSG(cerr << "MFX: can not call getProperty(), backend has not been initialized" << endl); + return 0; + } + switch (prop) + { + case CAP_PROP_FRAME_WIDTH: + return frameSize.width; + case CAP_PROP_FRAME_HEIGHT: + return frameSize.height; + default: + MSG(cerr << "MFX: unsupported property" << endl); + return 0; + } } bool VideoCapture_IntelMFX::setProperty(int, double) diff --git a/modules/videoio/src/cap_mfx_reader.hpp b/modules/videoio/src/cap_mfx_reader.hpp index cad5297b8a..bd3673864c 100644 --- a/modules/videoio/src/cap_mfx_reader.hpp +++ b/modules/videoio/src/cap_mfx_reader.hpp @@ -34,6 +34,7 @@ private: MFXVideoDECODE *decoder; SurfacePool *pool; void *outSurface; + cv::Size frameSize; bool good; }; diff --git a/modules/videoio/test/test_mfx.cpp b/modules/videoio/test/test_mfx.cpp index f739cbda17..6613383fde 100644 --- a/modules/videoio/test/test_mfx.cpp +++ b/modules/videoio/test/test_mfx.cpp @@ -111,6 +111,8 @@ TEST_P(Videoio_MFX, read_write_raw) VideoCapture cap; cap.open(filename, CAP_INTEL_MFX); ASSERT_TRUE(cap.isOpened()); + EXPECT_EQ(FRAME_SIZE.width, cap.get(CAP_PROP_FRAME_WIDTH)); + EXPECT_EQ(FRAME_SIZE.height, cap.get(CAP_PROP_FRAME_HEIGHT)); for (int i = 0; i < FRAME_COUNT; ++i) { ASSERT_TRUE(cap.read(frame)); From a6e15b2f577b5d00bce2d8db624d95838e19609d Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 29 Oct 2020 23:38:30 +0000 Subject: [PATCH 066/152] cmake: prefer using Eigen configuration files - for better compatibility with Ceres 2.0.0 CMake scripts --- cmake/OpenCVFindLibsPerf.cmake | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cmake/OpenCVFindLibsPerf.cmake b/cmake/OpenCVFindLibsPerf.cmake index b9b1a95799..7b3874ff0e 100644 --- a/cmake/OpenCVFindLibsPerf.cmake +++ b/cmake/OpenCVFindLibsPerf.cmake @@ -51,7 +51,12 @@ endif(WITH_CUDA) # --- Eigen --- if(WITH_EIGEN AND NOT HAVE_EIGEN) - find_package(Eigen3 QUIET) + if(NOT OPENCV_SKIP_EIGEN_FIND_PACKAGE_CONFIG) + find_package(Eigen3 CONFIG QUIET) # Ceres 2.0.0 CMake scripts doesn't work with CMake's FindEigen3.cmake module (due to missing EIGEN3_VERSION_STRING) + endif() + if(NOT Eigen3_FOUND) + find_package(Eigen3 QUIET) + endif() if(Eigen3_FOUND) if(TARGET Eigen3::Eigen) From d9877efe1d9d8c12d2cc29aacd4b511af4345fa1 Mon Sep 17 00:00:00 2001 From: Ruslan Garnov Date: Fri, 6 Nov 2020 01:59:09 +0300 Subject: [PATCH 067/152] Added rmat.cpp and media.cpp to files being built in standalone --- modules/gapi/cmake/standalone.cmake | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/gapi/cmake/standalone.cmake b/modules/gapi/cmake/standalone.cmake index ca54697524..5cc57d8269 100644 --- a/modules/gapi/cmake/standalone.cmake +++ b/modules/gapi/cmake/standalone.cmake @@ -15,6 +15,8 @@ file(GLOB FLUID_includes "${FLUID_ROOT}/include/opencv2/*.hpp" "${FLUID_ROOT}/include/opencv2/gapi/own/*.hpp" "${FLUID_ROOT}/include/opencv2/gapi/fluid/*.hpp") file(GLOB FLUID_sources "${FLUID_ROOT}/src/api/g*.cpp" + "${FLUID_ROOT}/src/api/rmat.cpp" + "${FLUID_ROOT}/src/api/media.cpp" "${FLUID_ROOT}/src/compiler/*.cpp" "${FLUID_ROOT}/src/compiler/passes/*.cpp" "${FLUID_ROOT}/src/executor/*.cpp" From eb24575e2ce6ae56613fe4b9709ea55b4d8a228e Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Fri, 6 Nov 2020 09:51:40 +0300 Subject: [PATCH 068/152] Use explicit opset of Unsqueeze from nGraph The change is needed due to removing default opset namespace for Unsqueeze in the scope of this refactoring activity: https://github.com/openvinotoolkit/openvino/pull/2767 Signed-off-by: Roman Kazantsev --- modules/dnn/src/layers/prior_box_layer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index dc1364a06b..f7340b1e67 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -607,7 +607,7 @@ public: auto priorBox = std::make_shared(slice_layer, slice_image, attrs); auto axis = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, std::vector{0}); - auto unsqueeze = std::make_shared(priorBox, axis); + auto unsqueeze = std::make_shared(priorBox, axis); return Ptr(new InfEngineNgraphNode(unsqueeze)); } else @@ -628,7 +628,7 @@ public: auto priorBox = std::make_shared(slice_layer, slice_image, attrs); auto axis = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, std::vector{0}); - auto unsqueeze = std::make_shared(priorBox, axis); + auto unsqueeze = std::make_shared(priorBox, axis); return Ptr(new InfEngineNgraphNode(unsqueeze)); } } From ad71a1633cff03f72cfcaa85b7c0ad35a8356d48 Mon Sep 17 00:00:00 2001 From: junxnone Date: Thu, 5 Nov 2020 12:25:38 +0800 Subject: [PATCH 069/152] fix truncate threshold example display issue in py_tutorials Signed-off-by: junxnone --- .../py_thresholding/images/threshold.jpg | Bin 15543 -> 18675 bytes .../py_thresholding/py_thresholding.markdown | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/py_tutorials/py_imgproc/py_thresholding/images/threshold.jpg b/doc/py_tutorials/py_imgproc/py_thresholding/images/threshold.jpg index e203927791e209945680447d515812c543ca76e9..c7053cc76d59c9545598274275b62f3f0a68a6ba 100644 GIT binary patch literal 18675 zcmeIZ3tUX=`!~Fjio!@IN(PmX3YAD2$C6|a(#dS6jxwk;F|`tksM$%ig(fq^}|688VAy zp32HBg{CRX$STW7DE-R`A8ZS6dM_vbHPdwTo6 z35G{Te+b9W@rg;jWFXn!)dJtYEB0UXQU-cWo3w$#q+T-90>MXCdAi(^HS+Ve>{B>; zR%PkhOEXlr#yrR^ovCB8A5lAYu1-;1*OaFxm{e^_v;RKD!u~I5_NQWh>D3O+l9d6* zlU0TYkVO12ayj(>#D_6ol1lKVsZov4H11Fvn+un%w+s;AtC}^VA7%7TBwS-XGwn3D zed^<1+-S^D@A|s)*u2j6vK!hZY~9#U)s-)e^I9%p^_)C*RfiO6*Lf|43h@J_fxL^f z`4nRLg`#jXniATu_jztZfkb0(;)gHYqLU>WYn|A$Yp<5XyfOSbD|_Wyap`SYm2wi| z0AFFMAW|qY8}@m%0Mno_USsPTN(}h+@*6ywx$&ZXmpmGDbCa4s*#^qBTj{um>5nM$ zDRqsqAKS4m7|PEgX;S*DV0Gv6tw?R*l@ZUxgz_k#-Oc`Q%{IODnf}x@gC2M`z^Ey_ zS?k1o7v*OgEm~snsD4JZ6k3QD5@{TD>OL+9B~tAKEoD|~1dC!*T|2GfQhF0-xf}Re zeDGR4uWOI2-I}DG$xeFrLl4Jkm=R9FZRCosd4hzBp$-jK^d;AL)?pV)H4mskH4f_D zg#qA>nLUw1sA>)n%I*BGokEw^JRf`#mZTgUhr z6(Z7=MG9y}U_kZ}G~p03c$dvHJk(wl>|uZFi?M$rGqK6g`8(m6tF7>?@oxf~o~8M_ zb3$w-v6!0p7fE5sJ^@}xYxn`nYFmkf&ykE9r+&b*za=<2FUv~rEtZe-@sr%S(EO%7 z@=465;?DUGBfWgT8sx5v6td+)fXy{5&5#;i#N{077G7)+m(?Cy!I@VOl>G8}+q#6z zZ(TYi;q$MDsu$1wCVca)bg${TOFDAE6ixirkh75^SHTby8 zqaF9p_j6s3ST>u^*0eP=T=qhf*pXUzHPys&P2RM~!*34E-7~yF0EdzgBCdm6&MBxa z0$oLoi)7+MOIF3D%LDjjU`UtoFsq=+De%4Qp-kpng~%t?_%pQ6?+xnlP@C}U$u zvM8vH9v^B-wUa_}TO~B6)?1&4R9EzB-~%5Yo;JQB5u=lNQI2~rZ*BOoWO3N{@88?& zctr&`)y%QA`Dd*UQp*>OvWZ+;2TOPajY_BF9Yg}73pN%Yhwl1j_Qwx#2M-w}zo7o^psN^U~Ay1B6AEtvmJR&AZ68wqb zU0R73YeM7Ov*YhFoZp(&seew(jXiKF?$@27L;H{KS2f=v+KyGh`8_wS7E@aI<^$sb z7i%ObBb$o*Sfc%aV<@S8Rbd|QT-o%c?9S3lQTZk=u3G*tZcsnwM=hstOypm61fy~)7_hS998&26JEUwRIe^u0%}>C9EX z_bzPpm(a%{i@+M8g;XzEhB!u2gEGViO2QjToonN1d>?JQGR=adG>=D}PkendBw_p} zqt7p6Yi}CnpEXUpXH@&i@vU>hfSlkimvEK20;`ATGgndYWe}PNR_KkAwMnZ{7T;La ztKIj*r63bK-^V@{jaxsv5PKeGmsw|>KEe5X4A;e@>PnKV^nty1rl4&CDYRQ)6F!em zYL_h!(L)ZUafu82)f455+a1HR{rhiRbh}V$NKU(aYNS+>MxIB@-&%R>*82CSWG3_w zn{G}StKFc?0d3%!Uo$fHT~)Bce{fI7zIvnl{!RMdim`WVLqS!iZ6^`#hU;KSD-~_yVnO0)v@9_Pbup&2#yu(6&$(_WIk72e zdRetPecegDMH_bOsl7PKXQ~3N38y!5W>9bdJ=N^?nw?Lj&=sF<{c9mQXhc<#Y~+R? zn@El)qGvB6<#{Jkn{N!)1o@rG$;tijG{^gFi>tAGAYX*`;LV z$h49O5iYXpL76`BVUY&NqbA#y%=f*S>T% z$J;k(-b3Ax134i!*AMNtMXK`T2DTH3r)!Zhc!@xlrTR{3x}1ch8`2 z_InpqvQPu#fm&o&`R-;aoJ?VIh>al|I;n;UdvEM6qMR}_@YGKV-pZAvtm&((BpZ}) zHFoT;ox8V!+9o<4Qi0FMRnVe1`DsH}2`7n>7lc^?jYD+|rYfa;C(1g0l-uu8L&SN- zfBfWQVcP!AaNrE7-DGEMU!eP8zM^bc^7#qE08YTFvRZ+pUl)|~HbrOi`dHpXZAxtQ znspDneRHTAvMAf!>g&7TjokRH=Q%UnSl9ie%E_36PN51-=dfleH1m|@4uQ`$!Stf` z19-X7ejh2M>D_*1P4>eM`;7bBYOlw&-PteaD~emaMfS|slAdxHIY15zF_m0-LLkEm z;?;sL7%h_$U0q&>&hHbh>slJ&Slsg9bwR3@9|~hA(+&gg~K;sX>a={QAfzcuoq}ZE5A@Z{~$DZsmTfT2?!Ec(tf= zQ*YS?nMNKm?qZMx8p7$4Vun943O~KX$>RZ~sGP25wVb+6Aj!!?c&W$}i<5)&>-m@X zGnPaLAM&a3BIJ!e50Fb*Q0nCj8=R_C3$===BXHC4idqhLH={Agjp{D>#!&{|D`}qK zYDDC!(XU`x9vj+UsN$8Nk>_;e(`H|z`~0G4ite35K{2|+m^*1dsw3J)F&1883ShB8 zt--B?GskkmcicevPR)cTG-MvN4490-( z;%81qIwKzQ`WnbK!;K0wpADHvITp}q)@r0 zze+eFyuU6^yCE)~!ufEp=|RFfNc`)bnR6a#jYkYV5G@V)D23ERbg_?#;C?9-!D?4e z+JJf?<-3r4R(o?yaN)>K!J-!zGi|QxIpE{SL8nBiij1$mTL+#zG`C9hSSWAjxh*;7X3?*+ zUpn0nmS-A){NV)l2A`i%%D8@E=$aJrb~7?7L_K((Om*?RR~Gr^=e`|LEqImqCBTbX z)v6{q`SswH8=5hIyViwZl)xs7+&*>b=kd7`~T-1ZjxX3=VbB!(hlE zFyl%_k)tn!yO3%ub;2IjKZ3(>Wub)Q`DAqCaZK^X<8Fc?Qzkvw zQZ1VUqPpFUBQ?7Qf*0zfQ(DHm{oAXdAJDLOnpZXtzUH@+D}7)}Jy)0V2Px&8P?$498m zL&|k^bV>SPIHTs35DA6{s1gy>6UNYB;Rl(@;W`1CF!+;CsG?@GFLXL@Gg!{~+fg7CzkGbbK zl!CiaF!r{GAEzgpC5Tnc53bY_#-gi%(i&NKNxQ)-$*qu;6nZyfAOorC4vflvvZR@M z5*gdkVn*?QzR2M4_nd*F&*P5TYIo}3bmR^N{_imAb@@K8C97PDI~Ve2GeFQg&rzQ?lgFY zIq)!_hDNzyig%^ZcAO9f+Yc94DfFc>WOLnSJcA?9EE21kW7qHlQV96N!WUAgCX&E6 zBcm(HlDUMo$*VJc!Tcqtjmz$ULRfHxHwQVunOF?xNg-@rdyf<{GWbSAb?D-S;5NDS zK%Y#k{XF5BdmfJ+iYbPTloyAm2h@-wikn1_3U?gE-)=6?q`7AGj@}%#yfz^hJb$n`nv@vNt!C8ED+M1ENaO(Q08WS|G^a zGs2k~R2TFpj{%b5s=~xMcf1&4qsD-XHJ9`wMx*xFrag~v&EHDxwBD}x^yVs`TNSY` z@K5JKNRmRgI3gRk%{^BNH5!!t7-*E~Cj9i?|2cDpkD?=P1k}hztXhuE_#!y|nm*gq z7~L0QMO;8R-{Wv=c!-yjm!ajL$~RuMvVVAp?fUt6JiW@&wJ=v^lB(vBfEm#it?-D8Wwm}RG`wY&M zLVaf#6`GLb7z$jA!X!90W3$j@)w~R_e%oY)$B~Q((}I8oO9GD(F_9e=l^mpLqr64_ zNp97G&QF8;;@$Oc*`NL?dWYpBAxVf7y4J+vK4Oh-mO>t2Dd`okG1q-kNI45Gb%1*> zVIUPp?R@Cs=ZrU+%Vj540iZ7jV$_m~SYlNgrfAG%wP|&e%V<|Dh~d;N$Fp3$&6#Sz z$t@0Kx<>1hw}bVITe_Xa>)w5rogR)aRue~oH#{N?-(`p6g>nKsc86>*@%iUb zB$UPNWsGm}sK$?#tdT;GtYS%TTfjtIMi~7X*qlfybX%QzRtjA#$4|uenThXH2q-}j z`e8gHfS3kcv(<30!8`GiCabt_NpnSSN0V_5NW}JumdBKBk6Yq^`wRAIvRu;SDce}G zq?wS(a6o7~CLH4_+n&Jtj%4r_-I#iUhWvh;&pWY>81MXpI3vIr`aw{bjJM#fIX8|t zalxV!8#IcS)PZ-2|BVxLE1DAMI+(?C@ViY`vcQT95Eho`n00st= z4@S0x+ZG6Hu*B0oZCOQ79^j*cq)==IikCtsq!4&vJ&$zFOceZY(EZ28MKT|lbwB*YoIZ;% zpu{Ig)^<7mcuRW25o^W*jY`m?Qs@_wGFTMx!>pWL|ECSlAMZew{w*f3T=?HXDH*pP zi8wIf2tYBUUthxbds6U&@Mn+@B3-f2Z+_)arhO=MlUnJHL$m5Dd^$)dg zAuv|fTq9S2i`>}{lPeZL>!z%9*l&5yXN*5d`X&2SY8p!wADj55zFvFR&k(k zR=R-z+^LKd0{#>@JNtD_9a9oLr=$A%;*S(B$KIRaFlw+@%I#Q^? z@C)o3_aCqFAFuManE8iC_>Wilk5~DRSNTt_^8dbEWn2SOdLc5Po|P2B+&Gdd%iXw* zRkp96X)4&*t7B9DwK028{V18?HRhmVSf1*sK0`0$uu75as?7v;Gf08uU%{o`{;yb` zl6e1y=$BSYdLza=TYkVhAlT@FhFA**o5SdOp(I5}ELxBCRLoYG=xXtL0GGamTlz+=(WjgkP(K~p@cOB1V(*y#-7 zbE9+4O4wQy^2E1*39^Xd1?opvnC4Wy7wADzAaxfK9xEX5zc8-;AkOxjP`L7a)o2Z2 zKzq+%SKUzl#v3sP>4_&7oqBNaKr-<@;S{|?Uzj0DWExUk5!_Y6Df|tKbQUsF zjwwg*3Zsn6+?I}Q5g6rW>P9+{nhZzRa~tCM@Ji6GIra3w9A`6K-^Gh&CxcYRjKxj0 zIKs{kslb+Qyf7e02q)xZxustf%?3Yi$$xECCz?Zf%7qnfhz%tH7bFW%GXcyefL+9C zViZY%YQvaG-NZ9iLM=u%H^}90cB-W&J}4%y*=xIO#23dors45U!;l^sG6yc*#X4oE zQ-RN8RS_atDvO(~w7g-+lsK1qrXY69v#Z?+2O5(~shgi|Q#7^p-B4h%{pwNmde>ch z=imM)H(N>gi==q87MSiz$*D6zZG5&O?B@Q`UV%p#q^HFQX{|c^nbl*K`*_Cv*)4X+ z4dGMC6W9y9-AeMCm>_s1Hh6+&RpK=Qh$_^r`G}Cd?txD->Qj~HS!fY_#pJax?7Ur; zsaJZ~v8DOvPX_6&#pN+)p;~?Dht`4j~QIAFIfVerajP9TER%2Y^_BHCPkcZH>e>$1`p?1W96qXRMk`-9Z)QB>np|jhgY? z7k(3cU{UD60!5-&eFTfTyaQBeqd+1&d=USWYq0C>y$Zu1$(@@DU_&PKwwjW}>)SyZ zs|PkS|Gelp3U=diCd8YVnk0s~m>PuQMSIY4URN&C!>hhlAhLsJds(P;7Bc*Ne;J!= z$p2RC+a2VI9G`1C{d(%B_vckdW(QW#3rOK5z6eohhqMgRyOks!-OCcP(7Pm*#)oI4 z=iIh!Z7|K+C~FpfM=m|V*Q1AcHnW3omuYm~I`;mP^Mep}k;PY~qlP^aE%_Uu2CW6O z{QOv?gUtX^Pd@L(3czD}Z!Q9M)ZDOJ;`bBh(f)lC>#s;G@u~xdOF2ab!mY^ED(QQTr zh^V~9N-r1b?1?n3wHR}BvD2PomdMB2hdm1OFWP>NHqN^r?xna`p_PL);k(2EqVuuc+gkn-g-1jKS!#Zo4xKT-Y#-yXL+uW8hGo6Ylv z{d@%rx9sea=GYyNQkJY9k13IJ`m}SMWXn`_Z}t|!ODyKC02DnHk`zHHFtTJ!ScKje z*;3C*Zez>DDoCJMg~9*i%(`i{sNZ>69N4N|qpJU^SKE2>qK zjDOs4s%z#~@pgKKE;)E<$)%hWO!IPgphPFO+Z@ALs!5?*wTouw2n+JbG|ir?+Hc`*X>hxY@g4j2|^3ltwo;|Ol!`t~{>ectBbWPS?wIQuam=(H^AU0E|8k(@t zuOf>VQ4B!&awUP6X=M)AZ?H0fxg(_{ah>A_cqW02>xdQ!*Q49O4z=1Mp^Pz)@=|!U zG2dQ=SABJqAnWaH7=JyptK-uV;wDLB@vHvMj#3XVzjbdOUi!TH$}HwDCw>^u26L9@ zPi&ti60St^`0Q|xs-(qekRP7|`YX!$L|N~dsc*IK52mF*@Vb~%^Ekijg!M#SObTn- z!TZrVYt+un>Hnwh|A0i+*H?Z*u7pL!@rdm%{QM*4=%#M=bgC>$qJBzi?;;JTy7D1iAQP`@#%vqTzbcdkc7U< zq8OJo2O?fl2v=gVYOm-1TEB(vzB_IBKFySMdAu!!8*b7~!XSkTI&#)SJP*{h6%T?p z`J)X;51_Jiiqw4C^=MZtjRVrZ?W1&8B`qk7cRKlEUepIT5gyKlzG%Skp zze?Pu^CIrhrmf3PSntp7D`e&42I-ItvDCgdR+W6OqOs{&> z!%Z`|`GH)1)%W2Zk#*PMCppJ7R>nVhHY0CN+nm?9uOrwJTj2P*;JT-tEc9$X3EP$O z&r6JN`a&-HqZ`+jA*9~rS6}k)TsO3jR%NA8e@NSTf$rtEjU`($`tk~O{C)dpf4UiQ zUiR^=`O`AK;+26%b!3{OI#r~Cz{{2%Tw_INxGV?hi8M35)Sg`QVmP2bO36BGVxoGl z1EJQ`H|XT@x%t+Ib?UR`0cj`esb-gM7xiJ?5(xq z#j&b4GTL2s)<0?B1zz%J<0*k6^_fR~J<0}{Z-Y(cy!|qRcPPOKBT!|-l_@eRx2he-C-uwH}~mYDvaBc&O14(gdKbgrej9^eAT(bByb?(vlQ`w6BK$uwN)d zN5>cTbd#^am~95E-V_Fm7Z)zlrkarELboqHB* zJWI01?<(#qdaT_l=Rz0YKsw@4rOBy$as+t=E|!CN8~b z_!@OOF6Z=NH|J{K)#kVeSx+~{|t7P!4z6LlwbNkZl)JA5U34s{PIg)XQJk_tH0Hf&dMpulFd(SVGpFrx9J=i!9_ zCSnwH_2NxsCg?~HHq#6-=iy)DBk0pHE%7gCT$;BOQZn=P+lx3>pg(GCskT1L7{OHJbXDF-NRfD*R_fLKz=vEr(=ifKqIfE8aJ_# z5DlXtR0q)L1MQTNhb&nF&jRI1AsY_?XmQ}!k?{C&8?ioVA)$8w+W`E_xPT~yR(Z;f zeTWo;Ufh-b?g)uCy_z5+3h+SGQ$tUy{61^}fV(-nr8n?|#fyHx~3lcYrb{h|{;RW>`9UViw{hAiI=9Y^n${mL9>uz9}fDtT_5omTTXwhpv^r zLB4t=43(YHS&MDwKRS7Gmi&Qe9A1zIQomq9R(Y3FA$dMZFT<9i0qg0nF$*MnVD7i} z?cuE*cKPgCg(EhrbW@+QXZ^~0w>xONf%r6B-a6G?y2Wk>-UPP{=mp%{&o0VHd{7ZK za|@Eew<#S`6R!~%4=M<@x{<0e{RT6}HT+y%X1zNg)k)~IXRU5FR`-PXw!VTb&!X$s z&nM5iVz)XLt|mP-z8+#A)@`=JiQObGLsp~l4f~q>4IS(n8U8~vI@~-1i}z&vJZ@+E z2sg>kzjyb>dfBuJzpA}+)oeRSZ;0YeH>P^VwyPw`1Y{Vrz2c5c_JXH_;(6F5xO5A1 zJ=Sb&)m!qI zsVm+B&!LR%61d+8;%_A5oNIgoetBP48f9#oZ*!^b8Lugo>l?G-J$@d{O=8I~S9sHv z1MtcuN@S;%?laF~K$&ZYjTz zP-!s8@|I*W*E28lboWF?3^)3Gq?#q8#b{MmecaI}fZ@KGl;qk7B&zMt;pe(=t~;4clQ1`QuL=|$_uR9Kx#pI`KHMkDS7 z<2_-vcy%$eL&%_!v`Cx$L|Z5?C%glCtu#@Yha5GLV--1yob*ESpyU@q{G-kI;5yP$ zijR8?gr)g|2Ffx?JY9sNu9Vz1v@+>xr^Ip_6RjU`;cJDO zrk=v$!X(D1s+OI+JW@FhkRDjGur4W7 z^h3xg0Ip}V)h6cId*V|9(1cv(m`w2yR!a&QVZjXHAUZr=G$JnkzaygXHY**V z-z)Opy?=j!aE=Yg#-JP<%&)(_9a}wG$m;)9~UmbW?{(2No*!uCyZOXQYeQPu)MS;F`FA= zF?qe{di8MQa#`Ws?jo@Y>ot9mWuQQ1f3u}WU>Zu|v&-Lz3EiAIUWdXqoDSINbh)p1 z@YM&gOP5QE?SkW~NuJRSXQwqC6tD*h1c}vENvdEK`;yvWOx-#GIcjXinCZD_CTf57 z_@=6dr#$j4;=a5pXc}$5|IPW6o9;g6ZPUol&pkB>Itt&Tp(}}lH6jXCR#1HO{NT*VO=mhdf$)OxZ7Y|_Hhr{w<`*jr_Z|so&QzmTC1au2n3)~ol$zb zosf#Mkc*f`Du!>d(xuum1rZUJ$31v;tGYDeM|NGdeNm{abyei*=VaJfLbr5Dd39#m z?OE_`7`+J6aC7XIlV~%{``khpUrGDtrBFgP=;a1tGc%Gy>gjW_xs+2|_sayXm?G6-MRjV=?Uvnzp76UZAr7n#vm5w#{u%CnAzMJF-lDhv9zxsMpYoS1 ztm82EH~esvaa%X`#^VZWwAPCheNY4%^3z`frh-=p8!y~usRvr@KD(eX3o(e;mwO0b zTj1~KLC;nW&@mjjI58gNAM0L9A-^JwyIdBL|AS%2OaWdA6g}VeflZwd0!hP1b*3u= zR6)2jc=&=dED1FRTr0U`T-(qLSfOQNlHWCCTwjt`hA;hr#evSVs0Q#EKsOpNAi!^r zTL94kFdzV5VUhtM#*6V+Phy>%VVdzI7r=5K5cEc3<5b%^K@Yu7OQ@3;!x1gH=|B|w15I^r~?5-{DL zM2G*17@yxshlfampklXtc#1!89PkG`0e>Jl6R_8@9W3rw+V~z&wIMLV;UDwseLe%y z8sJz49|gIxDf})F+aRHuNTJNCSd#uEd1n%}+I3c8uZX3cIunY+Y=ys|H6@<VuiRL);3n`a{?8c2-CkzMxox`MzG~u3~jPql>#-mn#m~SA~ zLzcXD9QqLgP*OTVAMs(0ZG^v13nf6F|2l7A@Qr}#aKuURbU@DmQ!0`1`SR{(5Q8L1 zNCXlLr(K`}7L>%G#`5pbf&i&%l1)noY}!da4ItV8%K@Sdu@sDq{scAI#3}xj6I>K1 zPN|vXsL%jM<>A**tFTEn%D6gMzlnq|8^IYyj0y6>#^bh(SisM+D)!J+ z4}^`SXxdZhlklCS=mc(`Pz2ple_AgP@NdwXlFu1%djPL&Qa-~;Ni*t0t$;TVYlPqK zM2saj#t0Ku%oU6R)_(<<=O4#dWk-)=vq4Gfb{BQE6nbiW4LIzYAX1ASXip&CL~;5E zFG6yOY6Cp4%RB7FU&oXFAo5JA#x?#o{>NaV^4~eFDaO*DT<2ldU;H@Ww5DbS5ahlU zd;#nv`X2;0<0%aFSWd>e-aj;e7_A(1nSwaQPDlYP#RWWLiHWmgcihy5cs1dRCMmC4 z@+*5L+?QA%y6MCJ7)$9Nv5b-MPkedG3NQXWqA^ze9aqa`0ih2V8ALmvPSTV*z-oW; z_5OjE0?VJrJ!nD^L|)uy7i^9vFxZgTZvyoIe72}%OyX1oZN?*hHsTDpo8D4Rn7HX> zHVjYVb`r9G#WWz?Uy%A;wyAh)6*no{l$3yajwbvwm@%9Gfvi6>4cz}$9H{@~O8@`O zm7dYiRiuG^0v$|IAEd#b(FoB(v5vqfA_cu%g-Z#J*e_U^r+05@gHtZ^_M4;8Hg6-_ zHzzNtQB#28@sZ91@d6N6c;kO`G7A74q#}c&4qcV#S3C(vG7fY;qOMFRxAJF0ylwU8d@i(hROv8 zx0Sf>e*<*myhC`=^_(o=jxWP7_rZlj1b!xJvW)O8peX|5^~(1A9%d)>cY?L53zu#k z&1Us+;Vd zMLqog-FsX9nH?H%*{I(n?_UH`Ul`$ebHu5Px}jDMNLa6fVm_lyiRbtluXUu=wckn} zB|ku6#~1Zqf(%a)K*1S?$kSgK%18S!=sNUfb2{iE>&TH9?PoT zECmg&aRr!3ikFqP@ikUKN9CA?=V?%Q7JA%TqAnc%Y}}gm>2QwuGT9TLYi$n>KGM)I zGB6M*$J!#m#mbtIu`Mas;=5o{uS%gha3mU>oT+t@Lce;-3X&ovz%67Ss0$1Lf!e8#Uaw!z>4*Ee;0F%%AM^MW(g%u|sxX%*&!kqQC2+MK@aCCYM z<1OXD5OdKc(FRn+HO0$>DD|$^+osR*}H-oN^TQnoq&5dS5b*2J)MK z1>I35*bK_||I0{~K8X8nxUn0|afPG9y1w7BVq(&2vNE!2|M3s+F(e~~Fhf)# z5sM)q83a-W!Fvi}zW!lFZ4aOpACp~kCLvu+eN*8Rxm^tm4ZVZM!{><0D=bh{TC!AAYuR#rgSCe1 z)*Eg9&3uc+R!ghhd-mGd?{mPp9&$VE?&0Zm!p}e8WZ_# zVWIJrQ91s3o&iax3~|Xf%OAXc?!MPMsN;IXIJ}7mrBQD1pfi?SeMGkTVIJh$=Q5(y z1T&UltK$bb65O*mU#^HIvtBe>MUrZ4?AcjO!QQ)SyZhMKS#RaPV6B%%PnWdbhsQVE zPg!l>?owaKgBB4Cd5};N8~Ad|;j+Xw?ulHugX4-9tP<68|z}GHf#3&s(>vJFrAG)CAP(#tU@*kAH zWmam0Z)%*^s;qqZk_K&`dp4eJ*m!_3NV#V4^`idTZR-rS#vF4QqOJ5*Pchi_c%Gci zZB3PKq$*^hMs;c|Fl{MjiHvJ}cpox&|4`U6?JuszMkAwgp2Yks>$C7>{#$o0aa~x6 z*)qb$|J7b~-Jm^kDY?bKTKYZCWOTH#YDwiIW9EU_Ky^hm$KLu?)u*bD2ij@t%+of} zb7Z{q_Ny?W42m?SJ-mXt^w+bowGyZWA=wX1jOtPMDn>6KtI@a`(3dEgsUIe~pblj` z{K|NLt8)B(iHqf&GIm9GK>L3Gnu^nHT_Iw0)45K|vg=1%UsPai?_QYudFPdskmkgv z4UZ~i&nL(Zt&VStSt}9{WPRb|py?vjpl;{Jl?(CRfl08};vI^sPK7#bR9u;M2>t#{ z2}V~XELc_Udg<6X1@2}+nl*69>2N)M2M>Cj#e?tP1A!F3CIA~gfRcQNQI%Bm(Zc?f zWcRo(3s0THu~v_t9oBjoW?Cs7khF^!XRP<3e>D&Cs`}trSf$hDFAXy}8hCXJxBWqVXd7lMnUq_0{T=e* zK~3dSSE6@sLcg}&c4kDU$=2U|3^qq@-(Wo8dEnjB+Qs{?c1ptwJb%{A9ZA8!#*L4 zbqix+V!}7rVk0&De za>Yz(kDc%IeBrj&llu&A@t}@fa4h6$>p-3o%siX-Z(n&G=`0K1b}Z4&60tF^JZ@c@ zgzSIxC7ClRbzi9Z@XmzJ@o0R=PEU&C<%F&{We4}e`^JX6x8$2CU)5kmk*2SqBUE$W z+rVhYMqPsLh(xXTU_r-qiCE8%Ur)Wa6AMg^thw?QrD_iOE4(*lWGCm)KKbVb4i@8{ zf9Q%r)%ihO|8j-H_ z)N<{Y8I0KSD_1DjQzK8C1kWvMAQYT1%KgKksM`1PQREym+tJs{r8ZRBdubGshhImI zAqc7)4_hWWWXEam^BGJuKUS1bcr*TMf>{4i1fA9!XKa|UzsBdbm#9LZeE0Du>eUH) zKGL7=?3KNA>Y#`NX?SO}ze)LA=lrDo`*--L98iX@$)A2~ujurk=TB*i+`7b6VT+Tu z3{o!S6G6AE(XhumAIQQ{`KmlEK1u6IGznoD}^Y8el#6<=fqj=wCeXK%~A zcy&v&e&vy+t5er@*J>Tz5)i+lXFSIJ`mGe1yN*NiA3xG+$ZRBjJQ%%o??ach)j-X@ z&`F1n($7BbULX`^a-?d{y+;)l_e%b-G>OhXxg5c1T@wNQ`TNZ*H4}5E0`1Os#Y!u~ zcJCV&jWs2nG`<>iuHH%a%9r)eCH_>svo@p0U0gf!A2WDX^8G^36}dElxQFTIX4%;x zlFy(nZ5%L%LQnIcCmWf}Q4bUcBGhrsub0a=qc?uDuYjLy!uC`+@Sr#1?L279D`yE$ zGo>rFf1Zy1W=G1mz4`;T}94Fnnb{L z=Rc0!G^*pWiA+0EK~gpks$UCU=`!~^nzj%;8^c!6JeBM@$Q9(FO7P`)Tp!JXBa{y_ zRI!zLIJ;*=FDQj--Vmpk^#<-Rg3o)=`hu3C*p?$ah%IW($o1J^MQ;M zvYob>yNw#lgFfmV<3Z`yp)c%I+?o(itu&G6qnVnN#3*S0lsA@w*Bm9z#iE-lhsONs zZv>QgItOsu8u@L1C}Jz)KT4swVJw)7)MVNv%OY-Y&ZL8#!dL_8Jp}d>dsiRJLehBAx zV$xmW78C8dBXG~Pz;n!JxSr>Uw8|JUn z8CS?0M{Wkyt7$3^3VY6#%QpX?S9qY_Z@Y?zU`yev@_!JSxh#Og(?rd(58L{=Q)#59n5WI#Ba0UXkzw5Gp8c33mDEZA4`u;U0v zu==-oPkglBlRMHepPfOzd=&BJL79h%B1LIZ_#+vlK_i_*f>?SWajhh8oF4{PQ#|M> ze-lLsGawk*Ju9)@H+EC|U~4Y;z(nv2k~~3>;0s{}gF)idB5rcRB-cp=PY~KY?{Pa) zjuk{HB1Aj4#Ctl*SL7mbO(Z;S1oxazofYz7TJ(Pk5V1)|oogje0vY!L!%_5ixkfIE zU|%IF1J*y>&4V`UBYK-{%^}$-kI*D8pwG`fG&JY56Az+qraquXgwA}c9$_wP^Zfw@ z*{8hYLC@C=lR17Z@Sn+j7TKo;>)1)U;6l_L_TQeHss6L6wW zjQa6FAAYzF2-Kx#L!8r=fK8R4+f%NJFv~b&hAF&m$PQltQfC5)N>g7zr|0|%bHP-$ ztU7mFODZRfz=KA-TRMs_BM3gtKxWMIqXnS^xC|gf?>sxOZ%ly4dQlLGH^u$hYE9YR zj{dR$mJ_kNH=!DgdeI8vM%-9x<2{{;P+mq5Cje--Bh`yiLOaTGfaR72Ki@-zQbY0tSGHCp(zh^ zBmBmNyQ6F)4C!-LjqA;~`iN58^@`qg=mAG|zdTJkKK6*5kYiB#zN0qo}m72swfu_Nkca#Js? zC2OOF9MjtZ!fc+(3C`h$Yh_M-k%m5losBtS5DyAJ2R{$G52RO)%#8(QO(TF=X%w}n zkCY$?CLfqBg`V{xc(qAD0w_(es4LBn03r=Oz}I|+fDyS$5S7%Y?cBt&GZzFi%;4h$ z0-xUpV7sY>!~rG_wlm>V>Li96@Jj`Fgfb^%r10w)k+&Dcf78NIIcHbEBW^9Bhq;Y_ zN6G<*9N(ZR|tmYEX*YjxiD2OaRbfZAc!j38>>l9azIB)Mf#MgLDqjfTB7_o5}k9 zQ-rAWEC0UM>z^ty;5ins)&cn2+Cdce(MrHI=>zZZRpc4(cuZwMXomKUsLxQxG->Yl zsNDjxlpD`!VBa2@nmN#nXyM0J&8~6wOc8A;&DJap{pOW2ccVN!`YDwqmJvJUd6=p2 zFThqYz!jGP)k1$A_pu!hVy*RJI0C5x4C&mIK`|GE5dcikBIE}PkO;JKfAlj$UQ%>|WEgNM-1THNW^ zwK!)@Beo|U?=mJ?2=eXf-fuU?w*m?QqIKa2c0|uiEo7>?fafSWc2>O|cyiWT(-;sn zL;0M~%@D`Si1yYWaeD!ef=w*NCY$~!hxnz@+*tt&tya50RvAtGCg}_$^=T?gCQbbE z6xEg$*OEnyD`vvAi|e=*pcmLEYdiIY4eD{~EV%uPCXgcN`Dz8R1j;8UMM?3KSOh4V zpwGg=iK=F@u4D?RkOxUYc-jakYB}1A1;GTMoIu~9iP98kEx(eKn!Lru$ll5Rv^fz6;Xx)8P!@b~vtjnFEX?&UCXmY@0v8S~N z!(x{4pvDN|F{1RWx2EwQG*kJs>*HsrW7vlAAA!5(%gw-kwfLMftt1kf87|_Rs(Na7 z3Sh`Z(d{f|5)ay>kJvRuvxUx>gTBI5-}jp^sClTRaNpvnX)hq={9i!sD^XCTTK}k0 zbrj)ptPw<=grdRyTNba$H}SX^801Uq@&2&2K9iW-=8)d~6+oeV6NzwS;Q|iZ-TwUf(kDPglW^PrF@3aN}#%Y!V>!L1w3l#BUQ3llF&8G8x8c!&AQup<#h;}y#R zhm+5;J&>V-pzr4tr2oG9Myu~Qi`6{ zRD7M$d1KG^rS-6CYe^;UIUpe;-ash}_muHOY379j<2u`EFu z{4j3^-lChkl!9f>P32$heJuZLgVF3c*28Hc|D03VDXL|p@?q}7CD|_x#q?X{bmm`x zzFjpwWd9*`q9N+7ZlCOjpi`q$g%3Q(fN}O+GW$lrSh0=MqaNO6| zWetsDv)0V{bT4uHohm)@L3}H=#Lt=~=?U8Jd8yrIO8Ryu$38^@w8nz=TWB}igWJ}C z?OUA#+I=f9%JgUebLmn|j;KDEQGlgnj-UJuv?|r0H$Jlr45l;9-QgpBsUr&j;2BH! z&Q9BW?pY<6E1UzIoyb|`z(N1E`Iyh&fBQjBisB>rK$?e}rMjoVF4YBdw;cMUg{=SC zZM`U=`dKf=GH=lNwcd-rf{m?UQR&yS6x;2c3gKQl>4v0mcxwxpFtVu$Vyg zs-TNaZM5zp7;Jhc|bj7NiyKC?lY^B^%U zNDQ+bfT0k(*;+HdwJtuYT{7PKqM8Est6T&RipoJpxk$xZt1%Dppm)14+~r}0a3`i7 z?wfN9q{MRgye?avh2|ji+u0)4jQowmhMf)cZlYA`cqF*kv)vI-QzL3}V#|%;PwEOh zNX|?d%ZTs8jtGO{pd7aHp|6~aM+&Aze|&z&oKei6#2Fu7o$QK~>dRS)KBk9qEnXEMZ1>ETwmwK;SMm>2jCfJH5FJix@Dxu2aWe%2KhgYBr_a(OrCC(Py`4&u?H< zfZG|8D^z^5OLtBiGg9BvSaC_Fb{=%;?sKR6g~Os)08>U zRY9iZ@0BtT%5RqMNrpZwa^tk$rqw^$?gd9dv3 zc^eTG-P1iOkl}&hlT~#6S_90-b(CfK=5IFV)O--tTOlNrnQHjz_ZuO>)NN@vPh8@* zF`_W+W+Zd2ytsJDlTVU6yGxEsof@S?Fvdb$Z6B$b-o2px!b9#rT>lEkch6QFCsfX} zUeXyGl@PuW`xJk-zT6=L7b#8b=Adi>)ZH&PJkCWGk7=Y5>%}2D+^P02BMAs$XSpKi z$ajJ<$eBp4K3wVFQO8p9^JPl{3Im~DkuoymifS?& zc+ec`S!|zY1DUZJTOFp4ot8hCsb2%c=$}5i*4uADLJCx{g~g=ySnhVK8--2lRGR1Y z;WX}*-|J(YPF(-)^;qpT%&x6HO@2=BZxotaqk-dwFVJqSl4~x9KX$J(c1L|SIG@Y5 zVII5HUYFBvx`FZPW?T7%^=YYl&TGUzd1ANgt#FVM1f~b)g3W&7O%|>+NrUU>?*K%x z2TUz{v7JZQ+B_&DzEhX2`~;-BEf117+3CWOh3CK>@6iA$Mp&>b=N~E>2(E zEk8zK%)w}?D2fwRPaSA_*i}fLc~iSJm=76uW6&2TuJaUJ2XdpQXfmqPv*{&w+1PRi z1H0;*UX^18-6Os2-Y9hF(sF$2TqaIcwAaB!*ixq=>4kh+`1{v}_Y#fe z9fT8eHHGKe++7$xr?zgjZx{Of6`XE_Ptj2Hd)X3ly-Xr)kR)-^L5_;gt0{N0Mum$l zU*9jKOmOUMswld|#xsIfHET2vC4TrYN4>0c(Wdo#uN^HsXOov`x>r*JX{H*I8d<*j zWPU4sTgQfrEB`JkF1@1t$BF}Z|34p?`+J_1S93ffrmP}uEnDgvZHzu~Qo}w}UddZR z>f>|&6YkyzlXWy?URS*caC@>aw*Mh2-s)+DBV!MDneTkfXBec@j$=MP!s8T0#HykQ zKcC*y=f0hTY$SKhmY za#|TQhy6k1XF8}oA#2`m3zi%IZSJ77tlpyqL&xVz#-<$_eieZ1(hZBZR_wztS49K< zO_TEdmr0agIE;QA!F5+f@gQV3P%qnI)Z?0*=n-AEbUdg82D=}_WV-uJoV@G57M%5%S@WsySe zr&dUdt~x=Fg0O&cus_6O+7>;0GJK-blRm6a=}yn%BWicev-k_BoTs|tspC;q69Tqz z#wz#%HWj9F;ik*j`X*=Uf!$RR+%8*q zcyC?%K)V!?y&1l->r4;GB3yS+jrD?;WVt@3%J1~o6v-?Xoynu823p>XCgT%_J7?z5AT{lMDacKtC1wFKgTUT0q|EhmQq+QBMvBHS? zvhT{0Z>(5*LG3~Qn-7e-q(j8v6XuCKmQ%LsM{avk+D22Kd)+#9UTONmqnRJpt6j=h zQ%tI~VFp-7hRzSv`9%1Xen4Ec@!6V}IGd)_Rb{#_kI#9x(*t>O->}Ph{W6b&mBtc! z7YZMclMh^Y#=f}FP5R?+t=8{_g;xX(r!0S;*M&yCwGLXui0)rY^IK+`?vFo!h-*+Z zjD7Of>Eia)d+Zj@DeC)6HXzwOte!}XJrAeEhFr{e=#O*YyNU6LN(*!fNk@P9mbuO*TRd^jw`&&b3o@so&i-njvnJC#o7uW&csYAFqloN{ zswMg0-Yv;9ZQ8J?z3z|Nn2?%>1tl}(093bA>4UGYKa^yDFI%VmJUn8{Uc!UU24b0a zW{Hsv%3>x`Ox5beo;2vcgQ<7m^Cwbg%FS)`v!V09yL06Y&((3B2?U5Fb_YU2N1;LU zwd)i2QMT6l9aY??5{}-am4*((cBElHyMRIWcjG~+vCRG+P%|^Q=h54rF0Nj7K$`S@ zt9}yI(?sOBk^SC+GRyIdFrzljJJbW&0R`Bt{iHXZPPs@zV^Gw9UrNh!PZM+hQfn`0 zz%Q<)$q~LI_sUZ>FT0or?a>n2kG5K60tOP2v;PI}u5UBsTCD|*-*F=X!?SDJEz7>i%!RV^umICe1RH`s)BAtqVjQOT1j~Q+BH{ow(DZ#(g-gf_$B(?$7(+Qm7xb9-bj--%#^n$+| zQcfcU;pD{z#HkaUho-j@rxLNMkX`hp{n*Bs#6xH!A$i?LJR&b$R13VKklP`K{J9v z7oQA$Q>n z83XW-aa;1g8@DxKA-LzyeK6IMJmPAfg9{+8W`Z?oOv?t~aDwR)zCJ{Iah)IuzyK<> z5xAZ4*@wX7mr~SkdKpp3w`cxkANX8b%V8j0@toUcm_KON`~bJq(oOOT z8iQ?64gfD81NO%W5&&!q*48OBiS|$mlUqRMr^b=VfFC!qW>B57fiFa%JBft;hIjet zceoU=>^Hh3C{h0xFEXB~g=SxE{ozGc`xUf&Y-iYDzUdbK7@)_{0j~4;$w;$qldGjW z*@3+M6van&p3{Y7+5?XI1PP=`mss{|6F>?t$r_ad(a%K|G;Xlp=EKdkH^>SoxYEk8 zNt@#_W0cnG*ZxhbwEeq-;TAg0!JyI$_z9u#4+#N%nK>Ufy*4|(ze4+Dpr@jpI}_~D zd{31k@Z)g6$r;>lN(8wRUBiREF5p457vtEScI_P-fMyozMthC>_+!1)FKr zr~O$S90K`iRB}!>mWs140s{EqNQW(-;_g5;T4^?@kB33C_v75amZ>kyFv9RJsGWEi zjN-!pVTFL+ZPNI1x0>Ln7JNZtXkUTF1odP6Y*3B@c(>HhRqe{8_*_tGZR>Odq_2)< zZb1n@eFG9`rH;O%n;sI>Nq)uc{pX==Ge_VX{vCyrcl={s{KDB9shh#)c`rjDu%>l) zf%%vO_f->j7J<_WW0TZgdmor`)HD`&<=JbzS-Ud zB*kWner>?>{G9|-ZGRdd__!YgUpfQR)PYCI_4YkmfY<)t4|vso#o@t*Y*`d{+u#qs zx*G@)G@xIQ2U7N{`BaS;!26#xev;3pYCH}oIp1{n{+Lkpm9tg-`OJqD^`2s58DEk7aRWuErGR||EYBAl z0lbf~EcO;u@wwngQnS4y$lq-OlD9!QO>%kxe(RuFq}4$gNHH-c&%Z%qfMEG!!D`Ah;HdNbkf2z*6w$~1Up$>s#k zbWY>@H1ta!%}D{DfIGh&DibKY0PbL81hw1#3j&ts*Fs#Sfue$phpLl!2fI15R3}nH zw9|;SvH1Vx41~omczn!vc40RmNrKW1)FA?v2jVS&_x?~p_0CtNiP|kMQP9YNzT|t~ zrhR>~nzIoun(7gQ=O;>i*_WMszZZ(6>+Lar+0M?`em#gNNT! z@Fu38J}ha_X)d?9A9nhA+!~a#dEkd@Szga$X-=OxKJ9ERBs^Kt+3VeU>%7$P?Yjb# zgT9P{)NPisQVmcat4{hu^Cc7TbMW zbQA`(wwGPC2$$ynLAJO%?rC&fq8*wu-!kz)-;s_hxYQefEoT%a0P7V zdcIA8o3JVBQRe0+$c#UkdfW>HWiT0?&-MVr#oi~WBL*Jr%mu(4Rh5%_&4VgW@NJNy z2^%DnJ3m!)Z20?CAkTl)tj1Icuj@yM-4(}&wmmAT2=kMg(TUz6*8%CWRXK zpRjnYEaI9u7{yhZfIg!hy&Ejkf`7oeQ;iCai=u9WF|Rr}hYlUK#ev}x9vuHcHUk?6 zwC|N*z69z-7NQ?!0x{?`Q@m`K3j7K%gMdb{Y+;zepK8I}u%J|~5SD3!s&Aj&7=vNd zydlWJ(It=e`kobT{pC!_GMOjD&7|ghlw++$%QMK&ZnA>@K=2RI3F?=x;?l&Mi>dm* zi~4WdSaZr8@&hLpCmQGD#qSr_+bdF5SYW=q#qXj%eXpkKUtxUg{X}XEOdONLbq|)? zcP3Z+bM!-w*t7AwTC3G$Q6GlYE}H@h)knOY1=Tmj!^CrNBwjkdAHNfYk72! zE#u? Date: Fri, 6 Nov 2020 14:54:23 +0300 Subject: [PATCH 070/152] Fixed Inputs/OutputsDataMap conversion --- modules/gapi/src/backends/ie/giebackend/giewrapper.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp b/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp index 8f5a7eca11..ba0632d4f0 100644 --- a/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp +++ b/modules/gapi/src/backends/ie/giebackend/giewrapper.cpp @@ -25,7 +25,7 @@ using GIEParam = cv::gapi::ie::detail::ParamDesc; IE::InputsDataMap giewrap::toInputsDataMap (const IE::ConstInputsDataMap& inputs) { IE::InputsDataMap transformed; auto convert = [](const std::pair& p) { - return std::make_pair(p.first, std::make_shared(*p.second)); + return std::make_pair(p.first, std::const_pointer_cast(p.second)); }; std::transform(inputs.begin(), inputs.end(), std::inserter(transformed, transformed.end()), convert); return transformed; @@ -34,7 +34,7 @@ IE::InputsDataMap giewrap::toInputsDataMap (const IE::ConstInputsDataMap& inputs IE::OutputsDataMap giewrap::toOutputsDataMap (const IE::ConstOutputsDataMap& outputs) { IE::OutputsDataMap transformed; auto convert = [](const std::pair& p) { - return std::make_pair(p.first, std::make_shared(*p.second)); + return std::make_pair(p.first, std::const_pointer_cast(p.second)); }; std::transform(outputs.begin(), outputs.end(), std::inserter(transformed, transformed.end()), convert); return transformed; From bb5b628cce18c3b947c12aef6df5063244b8d1ea Mon Sep 17 00:00:00 2001 From: Roman Kazantsev Date: Fri, 6 Nov 2020 09:51:40 +0300 Subject: [PATCH 071/152] Use explicit opset of Unsqueeze from nGraph backporting commit eb24575e2ce6ae56613fe4b9709ea55b4d8a228e --- modules/dnn/src/layers/prior_box_layer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/dnn/src/layers/prior_box_layer.cpp b/modules/dnn/src/layers/prior_box_layer.cpp index 7385afd3b0..dd39ce4417 100644 --- a/modules/dnn/src/layers/prior_box_layer.cpp +++ b/modules/dnn/src/layers/prior_box_layer.cpp @@ -595,7 +595,7 @@ public: auto priorBox = std::make_shared(slice_layer, slice_image, attrs); auto axis = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, std::vector{0}); - auto unsqueeze = std::make_shared(priorBox, axis); + auto unsqueeze = std::make_shared(priorBox, axis); return Ptr(new InfEngineNgraphNode(unsqueeze)); } else @@ -616,7 +616,7 @@ public: auto priorBox = std::make_shared(slice_layer, slice_image, attrs); auto axis = std::make_shared(ngraph::element::i64, ngraph::Shape{1}, std::vector{0}); - auto unsqueeze = std::make_shared(priorBox, axis); + auto unsqueeze = std::make_shared(priorBox, axis); return Ptr(new InfEngineNgraphNode(unsqueeze)); } } From bed5debca639f35931b778cf9e7727e4f27c7659 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 7 Nov 2020 17:27:33 +0000 Subject: [PATCH 072/152] dnn: use OpenVINO 2021.1 defines --- cmake/OpenCVDetectInferenceEngine.cmake | 4 ++-- modules/dnn/src/op_inf_engine.hpp | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/cmake/OpenCVDetectInferenceEngine.cmake b/cmake/OpenCVDetectInferenceEngine.cmake index 3eaf890f32..ceb7b7989c 100644 --- a/cmake/OpenCVDetectInferenceEngine.cmake +++ b/cmake/OpenCVDetectInferenceEngine.cmake @@ -135,9 +135,9 @@ endif() if(INF_ENGINE_TARGET) if(NOT INF_ENGINE_RELEASE) - message(WARNING "InferenceEngine version has not been set, 2020.4 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.") + message(WARNING "InferenceEngine version has not been set, 2021.1 will be used by default. Set INF_ENGINE_RELEASE variable if you experience build errors.") endif() - set(INF_ENGINE_RELEASE "2020040000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)") + set(INF_ENGINE_RELEASE "2021010000" CACHE STRING "Force IE version, should be in form YYYYAABBCC (e.g. 2020.1.0.2 -> 2020010002)") set_target_properties(${INF_ENGINE_TARGET} PROPERTIES INTERFACE_COMPILE_DEFINITIONS "HAVE_INF_ENGINE=1;INF_ENGINE_RELEASE=${INF_ENGINE_RELEASE}" ) diff --git a/modules/dnn/src/op_inf_engine.hpp b/modules/dnn/src/op_inf_engine.hpp index e8fdada99a..bb9563f4ac 100644 --- a/modules/dnn/src/op_inf_engine.hpp +++ b/modules/dnn/src/op_inf_engine.hpp @@ -27,10 +27,11 @@ #define INF_ENGINE_RELEASE_2020_2 2020020000 #define INF_ENGINE_RELEASE_2020_3 2020030000 #define INF_ENGINE_RELEASE_2020_4 2020040000 +#define INF_ENGINE_RELEASE_2021_1 2021010000 #ifndef INF_ENGINE_RELEASE -#warning("IE version have not been provided via command-line. Using 2020.4 by default") -#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2020_4 +#warning("IE version have not been provided via command-line. Using 2021.1 by default") +#define INF_ENGINE_RELEASE INF_ENGINE_RELEASE_2021_1 #endif #define INF_ENGINE_VER_MAJOR_GT(ver) (((INF_ENGINE_RELEASE) / 10000) > ((ver) / 10000)) From df7bf9a048468e424831d521721ca8e028ae0621 Mon Sep 17 00:00:00 2001 From: catree Date: Sun, 8 Nov 2020 14:42:47 +0100 Subject: [PATCH 073/152] Fix typo in OpenCVFindOpenBLAS.cmake file. --- cmake/OpenCVFindOpenBLAS.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/OpenCVFindOpenBLAS.cmake b/cmake/OpenCVFindOpenBLAS.cmake index 6cb486d95d..d1db034908 100644 --- a/cmake/OpenCVFindOpenBLAS.cmake +++ b/cmake/OpenCVFindOpenBLAS.cmake @@ -57,7 +57,7 @@ SET(Open_BLAS_INCLUDE_SEARCH_PATHS ) SET(Open_BLAS_LIB_SEARCH_PATHS - $ENV{OpenBLAS}cd + $ENV{OpenBLAS} $ENV{OpenBLAS}/lib $ENV{OpenBLAS_HOME} $ENV{OpenBLAS_HOME}/lib From a104e7c59368f6500c5a9083e1d577d48f9c54ee Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 9 Nov 2020 19:12:09 +0000 Subject: [PATCH 074/152] doxygen: adjust IMAGE_PATH, allow custom OPENCV_DOCS_EXTRA_IMAGE_PATH - add opencv/modules - add opencv_contrib/modules --- doc/CMakeLists.txt | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/doc/CMakeLists.txt b/doc/CMakeLists.txt index 107c01a144..83859314b3 100644 --- a/doc/CMakeLists.txt +++ b/doc/CMakeLists.txt @@ -130,9 +130,23 @@ if(DOXYGEN_FOUND) set(tutorial_js_path "${CMAKE_CURRENT_SOURCE_DIR}/js_tutorials") set(example_path "${CMAKE_SOURCE_DIR}/samples") + set(doxygen_image_path + ${CMAKE_CURRENT_SOURCE_DIR}/images + ${paths_doc} + ${tutorial_path} + ${tutorial_py_path} + ${tutorial_js_path} + ${paths_tutorial} + #${OpenCV_SOURCE_DIR}/samples/data # TODO: need to resolve ambiguous conflicts first + ${OpenCV_SOURCE_DIR} + ${OpenCV_SOURCE_DIR}/modules # /modules + ${OPENCV_EXTRA_MODULES_PATH} # /modules + ${OPENCV_DOCS_EXTRA_IMAGE_PATH} # custom variable for user modules + ) + # set export variables string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_INPUT_LIST "${rootfile} ; ${faqfile} ; ${paths_include} ; ${paths_hal_interface} ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial} ; ${tutorial_contrib_root}") - string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/images ; ${paths_doc} ; ${tutorial_path} ; ${tutorial_py_path} ; ${tutorial_js_path} ; ${paths_tutorial}") + string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_IMAGE_PATH "${doxygen_image_path}") string(REPLACE ";" " \\\n" CMAKE_DOXYGEN_EXCLUDE_LIST "${CMAKE_DOXYGEN_EXCLUDE_LIST}") string(REPLACE ";" " " CMAKE_DOXYGEN_ENABLED_SECTIONS "${CMAKE_DOXYGEN_ENABLED_SECTIONS}") # TODO: remove paths_doc from EXAMPLE_PATH after face module tutorials/samples moved to separate folders From 08271e5591770e0c1efaaff839db6da03401593b Mon Sep 17 00:00:00 2001 From: Igor Murzov Date: Tue, 10 Nov 2020 15:36:13 +0300 Subject: [PATCH 075/152] Fix code snippets inclusion into video tutorials Code snippets need a section marked with ### above to render properly --- .../background_subtraction.markdown | 3 +-- .../video_input_psnr_ssim.markdown | 13 ++++++------- .../videoio/video-write/video_write.markdown | 2 +- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/doc/tutorials/video/background_subtraction/background_subtraction.markdown b/doc/tutorials/video/background_subtraction/background_subtraction.markdown index 91dbd02d9b..267acc6f60 100644 --- a/doc/tutorials/video/background_subtraction/background_subtraction.markdown +++ b/doc/tutorials/video/background_subtraction/background_subtraction.markdown @@ -32,8 +32,7 @@ In this tutorial you will learn how to: -# Create and update the background model by using @ref cv::BackgroundSubtractor class; -# Get and show the foreground mask by using @ref cv::imshow ; -Code ----- +### Code In the following you can find the source code. We will let the user choose to process either a video file or a sequence of images. diff --git a/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown b/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown index 2cd038255a..08cc596964 100644 --- a/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown +++ b/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown @@ -126,8 +126,7 @@ captRefrnc.set(CAP_PROP_POS_FRAMES, 10); // go to the 10th frame of the video For properties you can read and change look into the documentation of the @ref cv::VideoCapture::get and @ref cv::VideoCapture::set functions. -Image similarity - PSNR and SSIM --------------------------------- +### Image similarity - PSNR and SSIM We want to check just how imperceptible our video converting operation went, therefore we need a system to check frame by frame the similarity or differences. The most common algorithm used for @@ -145,15 +144,15 @@ Here the \f$MAX_I\f$ is the maximum valid value for a pixel. In case of the simp per pixel per channel this is 255. When two images are the same the MSE will give zero, resulting in an invalid divide by zero operation in the PSNR formula. In this case the PSNR is undefined and as we'll need to handle this case separately. The transition to a logarithmic scale is made because the -pixel values have a very wide dynamic range. All this translated to OpenCV and a C++ function looks +pixel values have a very wide dynamic range. All this translated to OpenCV and a function looks like: @add_toggle_cpp -@include cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-psnr +@snippet cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-psnr @end_toggle @add_toggle_python -@include samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-psnr +@snippet samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-psnr @end_toggle Typically result values are anywhere between 30 and 50 for video compression, where higher is @@ -172,11 +171,11 @@ implementation below. Transactions on Image Processing, vol. 13, no. 4, pp. 600-612, Apr. 2004." article. @add_toggle_cpp -@include cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-mssim +@snippet samples/cpp/tutorial_code/videoio/video-input-psnr-ssim/video-input-psnr-ssim.cpp get-mssim @end_toggle @add_toggle_python -@include samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-mssim +@snippet samples/python/tutorial_code/videoio/video-input-psnr-ssim.py get-mssim @end_toggle This will return a similarity index for each channel of the image. This value is between zero and diff --git a/doc/tutorials/videoio/video-write/video_write.markdown b/doc/tutorials/videoio/video-write/video_write.markdown index feafc4408d..b81107559e 100644 --- a/doc/tutorials/videoio/video-write/video_write.markdown +++ b/doc/tutorials/videoio/video-write/video_write.markdown @@ -63,7 +63,7 @@ specialized video writing libraries such as *FFMpeg* or codecs as *HuffYUV*, *Co an alternative, create the video track with OpenCV and expand it with sound tracks or convert it to other formats by using video manipulation programs such as *VirtualDub* or *AviSynth*. -The *VideoWriter* class +The VideoWriter class ----------------------- The content written here builds on the assumption you From 5f1ca33c6f06727665692ea43988caf5f8caa02b Mon Sep 17 00:00:00 2001 From: Orest Chura Date: Tue, 10 Nov 2020 21:57:52 +0300 Subject: [PATCH 076/152] Merge pull request #18652 from OrestChura:oc/morphologyEx [G-API]: morphologyEx() Standard Kernel Implementation * cv::gapi::morphologyEx() kernel - implemented (without separate 3x3 version) - tests added: check only different operations, not kernels/borders * Address comments: add `const` where needed * Replaced fundamental tyeps -> enums where needed - added operator<< overload for cv::MorphTypes for tests output --- modules/gapi/include/opencv2/gapi/imgproc.hpp | 41 ++++++++++++++++++- modules/gapi/src/api/kernels_imgproc.cpp | 7 ++++ modules/gapi/src/backends/cpu/gcpuimgproc.cpp | 11 +++++ .../gapi/test/common/gapi_imgproc_tests.hpp | 2 + .../test/common/gapi_imgproc_tests_inl.hpp | 23 +++++++++++ .../gapi/test/common/gapi_tests_common.hpp | 19 +++++++++ .../gapi/test/cpu/gapi_imgproc_tests_cpu.cpp | 24 +++++++++++ 7 files changed, 126 insertions(+), 1 deletion(-) diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp index 294b3b7842..cc091dfa8e 100644 --- a/modules/gapi/include/opencv2/gapi/imgproc.hpp +++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp @@ -78,6 +78,14 @@ namespace imgproc { } }; + G_TYPED_KERNEL(GMorphologyEx, , + "org.opencv.imgproc.filters.morphologyEx") { + static GMatDesc outMeta(const GMatDesc &in, MorphTypes, Mat, Point, int, + BorderTypes, Scalar) { + return in; + } + }; + G_TYPED_KERNEL(GSobel, , "org.opencv.imgproc.filters.sobel") { static GMatDesc outMeta(GMatDesc in, int ddepth, int, int, int, double, double, int, Scalar) { return in.withDepth(ddepth); @@ -521,7 +529,7 @@ anchor is at the element center. @param iterations number of times erosion is applied. @param borderType pixel extrapolation method, see cv::BorderTypes @param borderValue border value in case of a constant border -@sa dilate +@sa dilate, morphologyEx */ GAPI_EXPORTS GMat erode(const GMat& src, const Mat& kernel, const Point& anchor = Point(-1,-1), int iterations = 1, int borderType = BORDER_CONSTANT, @@ -596,6 +604,37 @@ GAPI_EXPORTS GMat dilate3x3(const GMat& src, int iterations = 1, int borderType = BORDER_CONSTANT, const Scalar& borderValue = morphologyDefaultBorderValue()); +/** @brief Performs advanced morphological transformations. + +The function can perform advanced morphological transformations using an erosion and dilation as +basic operations. + +Any of the operations can be done in-place. In case of multi-channel images, each channel is +processed independently. + +@note Function textual ID is "org.opencv.imgproc.filters.morphologyEx" + +@param src Input image. +@param op Type of a morphological operation, see #MorphTypes +@param kernel Structuring element. It can be created using #getStructuringElement. +@param anchor Anchor position within the element. Both negative values mean that the anchor is at +the kernel center. +@param iterations Number of times erosion and dilation are applied. +@param borderType Pixel extrapolation method, see #BorderTypes. #BORDER_WRAP is not supported. +@param borderValue Border value in case of a constant border. The default value has a special +meaning. +@sa dilate, erode, getStructuringElement +@note The number of iterations is the number of times erosion or dilatation operation will be +applied. For instance, an opening operation (#MORPH_OPEN) with two iterations is equivalent to +apply successively: erode -> erode -> dilate -> dilate +(and not erode -> dilate -> erode -> dilate). + */ +GAPI_EXPORTS GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel, + const Point &anchor = Point(-1,-1), + const int iterations = 1, + const BorderTypes borderType = BORDER_CONSTANT, + const Scalar &borderValue = morphologyDefaultBorderValue()); + /** @brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator. In all cases except one, the \f$\texttt{ksize} \times \texttt{ksize}\f$ separable kernel is used to diff --git a/modules/gapi/src/api/kernels_imgproc.cpp b/modules/gapi/src/api/kernels_imgproc.cpp index 652f83935f..9a5b07c14a 100644 --- a/modules/gapi/src/api/kernels_imgproc.cpp +++ b/modules/gapi/src/api/kernels_imgproc.cpp @@ -73,6 +73,13 @@ GMat dilate3x3(const GMat& src, int iterations, return dilate(src, cv::Mat(), cv::Point(-1,-1), iterations, borderType, borderValue); } +GMat morphologyEx(const GMat &src, const MorphTypes op, const Mat &kernel, const Point &anchor, + const int iterations, const BorderTypes borderType, const Scalar &borderValue) +{ + return imgproc::GMorphologyEx::on(src, op, kernel, anchor, iterations, + borderType, borderValue); +} + GMat Sobel(const GMat& src, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType, const Scalar& bordVal) diff --git a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp index c07ed6785c..a3c4e1b60f 100644 --- a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp +++ b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp @@ -145,6 +145,16 @@ GAPI_OCV_KERNEL(GCPUDilate, cv::gapi::imgproc::GDilate) } }; +GAPI_OCV_KERNEL(GCPUMorphologyEx, cv::gapi::imgproc::GMorphologyEx) +{ + static void run(const cv::Mat &in, const cv::MorphTypes op, const cv::Mat &kernel, + const cv::Point &anchor, const int iterations, + const cv::BorderTypes borderType, const cv::Scalar &borderValue, cv::Mat &out) + { + cv::morphologyEx(in, out, op, kernel, anchor, iterations, borderType, borderValue); + } +}; + GAPI_OCV_KERNEL(GCPUSobel, cv::gapi::imgproc::GSobel) { static void run(const cv::Mat& in, int ddepth, int dx, int dy, int ksize, double scale, double delta, int borderType, @@ -478,6 +488,7 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels() , GCPUMedianBlur , GCPUErode , GCPUDilate + , GCPUMorphologyEx , GCPUSobel , GCPUSobelXY , GCPULaplacian diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp index 38a02985e7..d562b306c2 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp @@ -46,6 +46,8 @@ GAPI_TEST_FIXTURE(Erode3x3Test, initMatrixRandN, FIXTURE_API(CompareMats,int), 2 GAPI_TEST_FIXTURE(DilateTest, initMatrixRandN, FIXTURE_API(CompareMats,int,int), 3, cmpF, kernSize, kernType) GAPI_TEST_FIXTURE(Dilate3x3Test, initMatrixRandN, FIXTURE_API(CompareMats,int), 2, cmpF, numIters) +GAPI_TEST_FIXTURE(MorphologyExTest, initMatrixRandN, FIXTURE_API(CompareMats,MorphTypes), + 2, cmpF, op) GAPI_TEST_FIXTURE(SobelTest, initMatrixRandN, FIXTURE_API(CompareMats,int,int,int), 4, cmpF, kernSize, dx, dy) GAPI_TEST_FIXTURE(SobelXYTest, initMatrixRandN, FIXTURE_API(CompareMats,int,int,int,int), 5, diff --git a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp index 95728e87b7..c087733fa8 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp @@ -290,6 +290,29 @@ TEST_P(Dilate3x3Test, AccuracyTest) } } +TEST_P(MorphologyExTest, AccuracyTest) +{ + MorphShapes defShape = cv::MORPH_RECT; + int defKernSize = 3; + cv::Mat kernel = cv::getStructuringElement(defShape, cv::Size(defKernSize, defKernSize)); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::morphologyEx(in, op, kernel); + + cv::GComputation c(in, out); + c.apply(in_mat1, out_mat_gapi, getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::morphologyEx(in_mat1, out_mat_ocv, op, kernel); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_mat_gapi, out_mat_ocv)); + EXPECT_EQ(out_mat_gapi.size(), sz); + } +} + TEST_P(SobelTest, AccuracyTest) { // G-API code ////////////////////////////////////////////////////////////// diff --git a/modules/gapi/test/common/gapi_tests_common.hpp b/modules/gapi/test/common/gapi_tests_common.hpp index 113f3c73c0..bb045b83d1 100644 --- a/modules/gapi/test/common/gapi_tests_common.hpp +++ b/modules/gapi/test/common/gapi_tests_common.hpp @@ -848,6 +848,25 @@ inline std::ostream& operator<<(std::ostream& os, NormTypes op) #undef CASE return os; } + +inline std::ostream& operator<<(std::ostream& os, MorphTypes op) +{ +#define CASE(v) case MorphTypes::v: os << #v; break + switch (op) + { + CASE(MORPH_ERODE); + CASE(MORPH_DILATE); + CASE(MORPH_OPEN); + CASE(MORPH_CLOSE); + CASE(MORPH_GRADIENT); + CASE(MORPH_TOPHAT); + CASE(MORPH_BLACKHAT); + CASE(MORPH_HITMISS); + default: GAPI_Assert(false && "unknown MorphTypes value"); + } +#undef CASE + return os; +} } // namespace cv #endif //OPENCV_GAPI_TESTS_COMMON_HPP diff --git a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp index e7f9667096..7cba6b05db 100644 --- a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp +++ b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp @@ -130,6 +130,30 @@ INSTANTIATE_TEST_CASE_P(Dilate3x3TestCPU, Dilate3x3Test, Values(AbsExact().to_compare_obj()), Values(1,2,4))); +INSTANTIATE_TEST_CASE_P(MorphologyExTestCPU, MorphologyExTest, + Combine(Values(CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1, CV_32FC1), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(-1), + Values(IMGPROC_CPU), + Values(AbsExact().to_compare_obj()), + Values(cv::MorphTypes::MORPH_ERODE, + cv::MorphTypes::MORPH_DILATE, + cv::MorphTypes::MORPH_OPEN, + cv::MorphTypes::MORPH_CLOSE, + cv::MorphTypes::MORPH_GRADIENT, + cv::MorphTypes::MORPH_TOPHAT, + cv::MorphTypes::MORPH_BLACKHAT))); + +INSTANTIATE_TEST_CASE_P(MorphologyExHitMissTestCPU, MorphologyExTest, + Combine(Values(CV_8UC1), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(-1), + Values(IMGPROC_CPU), + Values(AbsExact().to_compare_obj()), + Values(cv::MorphTypes::MORPH_HITMISS))); + INSTANTIATE_TEST_CASE_P(SobelTestCPU, SobelTest, Combine(Values(CV_8UC1, CV_8UC3, CV_16UC1, CV_16SC1), Values(cv::Size(1280, 720), From 5dae27865244c0ff1ade47fcdd579457a394d6fc Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 7 Nov 2020 18:25:48 +0000 Subject: [PATCH 077/152] bindings: "inline namespace" --- modules/python/src2/hdr_parser.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/python/src2/hdr_parser.py b/modules/python/src2/hdr_parser.py index eba7000d47..d8b04b43ce 100755 --- a/modules/python/src2/hdr_parser.py +++ b/modules/python/src2/hdr_parser.py @@ -658,6 +658,10 @@ class CppHeaderParser(object): stack_top = self.block_stack[-1] context = stack_top[self.BLOCK_TYPE] + if stmt.startswith('inline namespace'): + # emulate anonymous namespace + return "namespace", "", True, None + stmt_type = "" if end_token == "{": stmt_type = "block" From 1b0dca9c2c7c2921303451be7676961447516edf Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Wed, 11 Nov 2020 01:54:01 +0300 Subject: [PATCH 078/152] Fix issues found by static analysis --- modules/calib3d/src/usac/ransac_solvers.cpp | 4 ++-- modules/core/src/dxt.cpp | 4 ++-- modules/gapi/include/opencv2/gapi/render/render_types.hpp | 2 +- modules/objdetect/src/qrcode.cpp | 3 ++- 4 files changed, 7 insertions(+), 6 deletions(-) diff --git a/modules/calib3d/src/usac/ransac_solvers.cpp b/modules/calib3d/src/usac/ransac_solvers.cpp index 65fa2d3b9f..0c7637d582 100644 --- a/modules/calib3d/src/usac/ransac_solvers.cpp +++ b/modules/calib3d/src/usac/ransac_solvers.cpp @@ -286,7 +286,7 @@ public: current_score = quality->getScore(models[i]); } else { if (is_magsac && iters % repeat_magsac == 0) { - if (!local_optimization->refineModel + if (local_optimization && !local_optimization->refineModel (models[i], best_score_thread, models[i], current_score)) continue; } else if (model_verifier->isModelGood(models[i])) { @@ -1028,4 +1028,4 @@ bool run (const Ptr ¶ms, InputArray points1, InputArray points2 } return false; } -}} \ No newline at end of file +}} diff --git a/modules/core/src/dxt.cpp b/modules/core/src/dxt.cpp index b307703a32..fcdb2a202f 100644 --- a/modules/core/src/dxt.cpp +++ b/modules/core/src/dxt.cpp @@ -531,14 +531,14 @@ template struct DFT_R5 template struct DFT_VecR2 { void operator()(Complex* dst, const int c_n, const int n, const int dw0, const Complex* wave) const { - return DFT_R2()(dst, c_n, n, dw0, wave); + DFT_R2()(dst, c_n, n, dw0, wave); } }; template struct DFT_VecR3 { void operator()(Complex* dst, const int c_n, const int n, const int dw0, const Complex* wave) const { - return DFT_R3()(dst, c_n, n, dw0, wave); + DFT_R3()(dst, c_n, n, dw0, wave); } }; diff --git a/modules/gapi/include/opencv2/gapi/render/render_types.hpp b/modules/gapi/include/opencv2/gapi/render/render_types.hpp index 08b14d1ddd..ca403be361 100644 --- a/modules/gapi/include/opencv2/gapi/render/render_types.hpp +++ b/modules/gapi/include/opencv2/gapi/render/render_types.hpp @@ -252,7 +252,7 @@ struct Mosaic { } - Mosaic() = default; + Mosaic() : cellSz(0), decim(0) {} /*@{*/ cv::Rect mos; //!< Coordinates of the mosaic diff --git a/modules/objdetect/src/qrcode.cpp b/modules/objdetect/src/qrcode.cpp index c42bb8a309..d47f1d3a20 100644 --- a/modules/objdetect/src/qrcode.cpp +++ b/modules/objdetect/src/qrcode.cpp @@ -1122,7 +1122,7 @@ bool QRDecode::computeClosestPoints(const vector &result_integer_hull) { CV_TRACE_FUNCTION(); double min_norm, max_norm = 0.0; - size_t idx_min; + size_t idx_min = (size_t)-1; for (size_t i = 0; i < original_points.size(); i++) { min_norm = std::numeric_limits::max(); @@ -1144,6 +1144,7 @@ bool QRDecode::computeClosestPoints(const vector &result_integer_hull) max_norm = min_norm; unstable_pair = std::pair(i, closest_pnt); } + CV_Assert(idx_min != (size_t)-1); closest_points.push_back(std::pair(idx_min, closest_pnt)); } From 3fc1c73064e4112a1e24e9d2d1fd41e3eabe132e Mon Sep 17 00:00:00 2001 From: Orest Chura Date: Wed, 11 Nov 2020 15:13:10 +0300 Subject: [PATCH 079/152] Merge pull request #18510 from OrestChura:oc/boundingRect [G-API]: findContours() and boundingRect() Standard Kernels Implementation * Add findContours() standard kernel - API and documentation provided: - as OpenCV provides two overloads whether to calculate hierarchy or not, but they differ by only the output in sight of G-API, two different G-API functions and kernels implemented - G-API Imgproc documentation divided into more parts according to imgproc module parts - some typos connected with division into parts corrected - `GArray>` overload for `get_out` function provided to coonvert correctly into `vector>` - OCV backend supported - accuracy tests provided * Add boundingRect() standard kernel - API and documentation provided: - GOpaque used as an output - as OpenCV provides two possibilities whether to take a gray-scale image or a set of 2D points (`Point2i` or `Point2f` supported), three different overloads of a single G-API function and three kernels implemented - for a gray-scale image the overload via `GMat` - for a set of `Point2i` - the one via GArray<`Point2i`> - set of `Point2f` -> GArray<`Point2f`> - OCV backend supported - accuracy tests provided - comparison function for Rects provided - some typos in `gapi_tests_common` corrected * Fix precommit windows warnings * - Addressing comments: - split tests - Fix Windows warnings * Static_cast for warnings * - Remove randomness - Fix unnecessary precision losses * - Forgot reference for RNG * addressing comments * equalizeHist -> no group * `const` addedin new functions * Address suggestions: - Hierarchical -> H - added cv::GMatDesc::isVectorPoins() - added support of giving a set of points to boundingRect() * Addressing comments - IoU comparison function added for Rects - isPointsVector moved from a GMatDesc method to a separate function in imgproc.hpp - enums instead of int - typos corrected * Addressing comments - findContours: Point offset -> GOpaque - removed "straight" comparison for Rects, IoU available only - changed vectors initialization -> fix Debug test run - Some typos * added comment for later upgrades * Fix not to corrupt docs by FIXME * Addressing commens - overload without offset added (as a temporary workaround) - checkMetaForFindingContours -> validateFindingContoursMeta - added ostream overload for enums used in tests --- .../include/opencv2/gapi/cpu/gcpukernel.hpp | 5 + modules/gapi/include/opencv2/gapi/imgproc.hpp | 240 ++++++++++++++- modules/gapi/src/api/kernels_imgproc.cpp | 42 +++ modules/gapi/src/backends/cpu/gcpuimgproc.cpp | 71 +++++ .../gapi/test/common/gapi_imgproc_tests.hpp | 15 + .../test/common/gapi_imgproc_tests_inl.hpp | 282 ++++++++++++++++++ .../gapi/test/common/gapi_tests_common.hpp | 108 ++++++- .../gapi/test/cpu/gapi_imgproc_tests_cpu.cpp | 72 +++++ 8 files changed, 821 insertions(+), 14 deletions(-) diff --git a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp index 741fbe18f0..5dd70bd2e8 100644 --- a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp +++ b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp @@ -271,6 +271,11 @@ template<> struct get_out >: public get_out>/GArray> conversion should be done more gracefully in the system +template struct get_out> >: public get_out> > +{ +}; + template struct get_out> { static U& get(GCPUContext &ctx, int idx) diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp index cc091dfa8e..0e4254cb87 100644 --- a/modules/gapi/include/opencv2/gapi/imgproc.hpp +++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp @@ -21,14 +21,45 @@ @{ @defgroup gapi_filters Graph API: Image filters @defgroup gapi_colorconvert Graph API: Converting image from one color space to another + @defgroup gapi_feature Graph API: Image Feature Detection + @defgroup gapi_shape Graph API: Image Structural Analysis and Shape Descriptors @} */ +namespace { +void validateFindingContoursMeta(const int depth, const int chan, const int mode) +{ + GAPI_Assert(chan == 1); + switch (mode) + { + case cv::RETR_CCOMP: + GAPI_Assert(depth == CV_8U || depth == CV_32S); + break; + case cv::RETR_FLOODFILL: + GAPI_Assert(depth == CV_32S); + break; + default: + GAPI_Assert(depth == CV_8U); + break; + } +} + +// Checks if the passed mat is a set of n-dimentional points of the given depth +bool isPointsVector(const int chan, const cv::Size &size, const int depth, + const int n, const int ddepth) +{ + return (ddepth == depth || ddepth < 0) && + ((chan == n && (size.height == 1 || size.width == 1)) || + (chan == 1 && size.width == n)); +} +} // anonymous namespace + namespace cv { namespace gapi { namespace imgproc { using GMat2 = std::tuple; using GMat3 = std::tuple; // FIXME: how to avoid this? + using GFindContoursOutput = std::tuple>,GArray>; G_TYPED_KERNEL(GFilter2D, ,"org.opencv.imgproc.filters.filter2D") { static GMatDesc outMeta(GMatDesc in, int ddepth, Mat, Point, Scalar, int, Scalar) { @@ -118,7 +149,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GCanny, , "org.opencv.imgproc.canny"){ + G_TYPED_KERNEL(GCanny, , "org.opencv.imgproc.feature.canny"){ static GMatDesc outMeta(GMatDesc in, double, double, int, bool) { return in.withType(CV_8U, 1); } @@ -126,12 +157,83 @@ namespace imgproc { G_TYPED_KERNEL(GGoodFeatures, (GMat,int,double,double,Mat,int,bool,double)>, - "org.opencv.imgproc.goodFeaturesToTrack") { + "org.opencv.imgproc.feature.goodFeaturesToTrack") { static GArrayDesc outMeta(GMatDesc, int, double, double, const Mat&, int, bool, double) { return empty_array_desc(); } }; + using RetrMode = RetrievalModes; + using ContMethod = ContourApproximationModes; + G_TYPED_KERNEL(GFindContours, >(GMat,RetrMode,ContMethod,GOpaque)>, + "org.opencv.imgproc.shape.findContours") + { + static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return empty_array_desc(); + } + }; + + // FIXME oc: make default value offset = Point() + G_TYPED_KERNEL(GFindContoursNoOffset, >(GMat,RetrMode,ContMethod)>, + "org.opencv.imgproc.shape.findContoursNoOffset") + { + static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return empty_array_desc(); + } + }; + + G_TYPED_KERNEL(GFindContoursH,)>, + "org.opencv.imgproc.shape.findContoursH") + { + static std::tuple + outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return std::make_tuple(empty_array_desc(), empty_array_desc()); + } + }; + + // FIXME oc: make default value offset = Point() + G_TYPED_KERNEL(GFindContoursHNoOffset,, + "org.opencv.imgproc.shape.findContoursHNoOffset") + { + static std::tuple + outMeta(GMatDesc in, RetrMode mode, ContMethod) + { + validateFindingContoursMeta(in.depth, in.chan, mode); + return std::make_tuple(empty_array_desc(), empty_array_desc()); + } + }; + + G_TYPED_KERNEL(GBoundingRectMat, (GMat)>, + "org.opencv.imgproc.shape.boundingRectMat") { + static GOpaqueDesc outMeta(GMatDesc in) { + GAPI_Assert((in.depth == CV_8U && in.chan == 1) || + (isPointsVector(in.chan, in.size, in.depth, 2, CV_32S) || + isPointsVector(in.chan, in.size, in.depth, 2, CV_32F))); + + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GBoundingRectVector32S, (GArray)>, + "org.opencv.imgproc.shape.boundingRectVector32S") { + static GOpaqueDesc outMeta(GArrayDesc) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GBoundingRectVector32F, (GArray)>, + "org.opencv.imgproc.shape.boundingRectVector32F") { + static GOpaqueDesc outMeta(GArrayDesc) { + return empty_gopaque_desc(); + } + }; + G_TYPED_KERNEL(GBGR2RGB, , "org.opencv.imgproc.colorconvert.bgr2rgb") { static GMatDesc outMeta(GMatDesc in) { return in; // type still remains CV_8UC3; @@ -280,7 +382,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GNV12toRGBp, , "org.opencv.colorconvert.imgproc.nv12torgbp") { + G_TYPED_KERNEL(GNV12toRGBp, , "org.opencv.imgproc.colorconvert.nv12torgbp") { static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) { GAPI_Assert(inY.depth == CV_8U); GAPI_Assert(inUV.depth == CV_8U); @@ -294,7 +396,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GNV12toGray, , "org.opencv.colorconvert.imgproc.nv12togray") { + G_TYPED_KERNEL(GNV12toGray, , "org.opencv.imgproc.colorconvert.nv12togray") { static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) { GAPI_Assert(inY.depth == CV_8U); GAPI_Assert(inUV.depth == CV_8U); @@ -309,7 +411,7 @@ namespace imgproc { } }; - G_TYPED_KERNEL(GNV12toBGRp, , "org.opencv.colorconvert.imgproc.nv12tobgrp") { + G_TYPED_KERNEL(GNV12toBGRp, , "org.opencv.imgproc.colorconvert.nv12tobgrp") { static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) { GAPI_Assert(inY.depth == CV_8U); GAPI_Assert(inUV.depth == CV_8U); @@ -800,6 +902,10 @@ proportional to sigmaSpace. GAPI_EXPORTS GMat bilateralFilter(const GMat& src, int d, double sigmaColor, double sigmaSpace, int borderType = BORDER_DEFAULT); +//! @} gapi_filters + +//! @addtogroup gapi_feature +//! @{ /** @brief Finds edges in an image using the Canny algorithm. The function finds edges in the input image and marks them in the output map edges using the @@ -807,7 +913,7 @@ Canny algorithm. The smallest value between threshold1 and threshold2 is used fo largest value is used to find initial segments of strong edges. See -@note Function textual ID is "org.opencv.imgproc.filters.canny" +@note Function textual ID is "org.opencv.imgproc.feature.canny" @param image 8-bit input image. @param threshold1 first threshold for the hysteresis procedure. @@ -842,7 +948,7 @@ The function can be used to initialize a point-based tracker of an object. A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector with qualityLevel=B . -@note Function textual ID is "org.opencv.imgproc.goodFeaturesToTrack" +@note Function textual ID is "org.opencv.imgproc.feature.goodFeaturesToTrack" @param image Input 8-bit or floating-point 32-bit, single-channel image. @param maxCorners Maximum number of corners to return. If there are more corners than are found, @@ -876,6 +982,8 @@ GAPI_EXPORTS GArray goodFeaturesToTrack(const GMat &image, /** @brief Equalizes the histogram of a grayscale image. +//! @} gapi_feature + The function equalizes the histogram of the input image using the following algorithm: - Calculate the histogram \f$H\f$ for src . @@ -893,6 +1001,120 @@ The algorithm normalizes the brightness and increases the contrast of the image. */ GAPI_EXPORTS GMat equalizeHist(const GMat& src); +//! @addtogroup gapi_shape +//! @{ +/** @brief Finds contours in a binary image. + +The function retrieves contours from the binary image using the algorithm @cite Suzuki85 . +The contours are a useful tool for shape analysis and object detection and recognition. +See squares.cpp in the OpenCV sample directory. + +@note Function textual ID is "org.opencv.imgproc.shape.findContours" + +@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero +pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold , +#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one. +If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer +image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL then @ref CV_32SC1 is supported only. +@param mode Contour retrieval mode, see #RetrievalModes +@param method Contour approximation method, see #ContourApproximationModes +@param offset Optional offset by which every contour point is shifted. This is useful if the +contours are extracted from the image ROI and then they should be analyzed in the whole image +context. + +@return GArray of detected contours. Each contour is stored as a GArray of points. + */ +GAPI_EXPORTS GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset); + +// FIXME oc: make default value offset = Point() +/** @overload +@note Function textual ID is "org.opencv.imgproc.shape.findContoursNoOffset" + */ +GAPI_EXPORTS GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method); + +/** @brief Finds contours and their hierarchy in a binary image. + +The function retrieves contours from the binary image using the algorithm @cite Suzuki85 +and calculates their hierarchy. +The contours are a useful tool for shape analysis and object detection and recognition. +See squares.cpp in the OpenCV sample directory. + +@note Function textual ID is "org.opencv.imgproc.shape.findContoursH" + +@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero +pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold , +#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one. +If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer +image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL -- @ref CV_32SC1 supports only. +@param mode Contour retrieval mode, see #RetrievalModes +@param method Contour approximation method, see #ContourApproximationModes +@param offset Optional offset by which every contour point is shifted. This is useful if the +contours are extracted from the image ROI and then they should be analyzed in the whole image +context. + +@return GArray of detected contours. Each contour is stored as a GArray of points. +@return Optional output GArray of cv::Vec4i, containing information about the image topology. +It has as many elements as the number of contours. For each i-th contour contours[i], the elements +hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based +indices in contours of the next and previous contours at the same hierarchical level, the first +child contour and the parent contour, respectively. If for the contour i there are no next, +previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative. + */ +GAPI_EXPORTS std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset); + +// FIXME oc: make default value offset = Point() +/** @overload +@note Function textual ID is "org.opencv.imgproc.shape.findContoursHNoOffset" + */ +GAPI_EXPORTS std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method); + +/** @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels +of gray-scale image. + +The function calculates and returns the minimal up-right bounding rectangle for the specified +point set or non-zero pixels of gray-scale image. + +@note Function textual ID is "org.opencv.imgproc.shape.boundingRectMat" + +@param src Input gray-scale image @ref CV_8UC1; or input set of @ref CV_32S or @ref CV_32F +2D points stored in Mat. + +@note In case of a 2D points' set given, Mat should be 2-dimensional, have a single row or column +if there are 2 channels, or have 2 columns if there is a single channel. Mat should have either +@ref CV_32S or @ref CV_32F depth + */ +GAPI_EXPORTS GOpaque boundingRect(const GMat& src); + +/** @overload + +Calculates the up-right bounding rectangle of a point set. + +@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32S" + +@param src Input 2D point set, stored in std::vector. + */ +GAPI_EXPORTS GOpaque boundingRect(const GArray& src); + +/** @overload + +Calculates the up-right bounding rectangle of a point set. + +@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32F" + +@param src Input 2D point set, stored in std::vector. + */ +GAPI_EXPORTS GOpaque boundingRect(const GArray& src); + +//! @} gapi_shape + +//! @addtogroup gapi_colorconvert +//! @{ /** @brief Converts an image from BGR color space to RGB color space. The function converts an input image from BGR color space to RGB. @@ -907,10 +1129,6 @@ Output image is 8-bit unsigned 3-channel image @ref CV_8UC3. */ GAPI_EXPORTS GMat BGR2RGB(const GMat& src); -//! @} gapi_filters - -//! @addtogroup gapi_colorconvert -//! @{ /** @brief Converts an image from RGB color space to gray-scaled. The conventional ranges for R, G, and B channel values are 0 to 255. Resulting gray color value computed as diff --git a/modules/gapi/src/api/kernels_imgproc.cpp b/modules/gapi/src/api/kernels_imgproc.cpp index 9a5b07c14a..faf8de54c7 100644 --- a/modules/gapi/src/api/kernels_imgproc.cpp +++ b/modules/gapi/src/api/kernels_imgproc.cpp @@ -122,6 +122,48 @@ cv::GArray goodFeaturesToTrack(const GMat& image, int maxCorners, d useHarrisDetector, k); } +GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset) +{ + return imgproc::GFindContours::on(src, mode, method, offset); +} + +GArray> +findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method) +{ + return imgproc::GFindContoursNoOffset::on(src, mode, method); +} + + +std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method, + const GOpaque &offset) +{ + return imgproc::GFindContoursH::on(src, mode, method, offset); +} + +std::tuple>,GArray> +findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method) +{ + return imgproc::GFindContoursHNoOffset::on(src, mode, method); +} + +GOpaque boundingRect(const GMat& src) +{ + return imgproc::GBoundingRectMat::on(src); +} + +GOpaque boundingRect(const GArray& src) +{ + return imgproc::GBoundingRectVector32S::on(src); +} + +GOpaque boundingRect(const GArray& src) +{ + return imgproc::GBoundingRectVector32F::on(src); +} + GMat BGR2RGB(const GMat& src) { return imgproc::GBGR2RGB::on(src); diff --git a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp index a3c4e1b60f..9eca0f12f0 100644 --- a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp +++ b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp @@ -221,6 +221,70 @@ GAPI_OCV_KERNEL(GCPUGoodFeatures, cv::gapi::imgproc::GGoodFeatures) } }; +GAPI_OCV_KERNEL(GCPUFindContours, cv::gapi::imgproc::GFindContours) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, const cv::Point& offset, + std::vector> &outConts) + { + cv::findContours(image, outConts, mode, method, offset); + } +}; + +GAPI_OCV_KERNEL(GCPUFindContoursNoOffset, cv::gapi::imgproc::GFindContoursNoOffset) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, + std::vector> &outConts) + { + cv::findContours(image, outConts, mode, method); + } +}; + +GAPI_OCV_KERNEL(GCPUFindContoursH, cv::gapi::imgproc::GFindContoursH) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, const cv::Point& offset, + std::vector> &outConts, std::vector &outHier) + { + cv::findContours(image, outConts, outHier, mode, method, offset); + } +}; + +GAPI_OCV_KERNEL(GCPUFindContoursHNoOffset, cv::gapi::imgproc::GFindContoursHNoOffset) +{ + static void run(const cv::Mat& image, const cv::RetrievalModes mode, + const cv::ContourApproximationModes method, + std::vector> &outConts, std::vector &outHier) + { + cv::findContours(image, outConts, outHier, mode, method); + } +}; + +GAPI_OCV_KERNEL(GCPUBoundingRectMat, cv::gapi::imgproc::GBoundingRectMat) +{ + static void run(const cv::Mat& in, cv::Rect& out) + { + out = cv::boundingRect(in); + } +}; + +GAPI_OCV_KERNEL(GCPUBoundingRectVector32S, cv::gapi::imgproc::GBoundingRectVector32S) +{ + static void run(const std::vector& in, cv::Rect& out) + { + out = cv::boundingRect(in); + } +}; + +GAPI_OCV_KERNEL(GCPUBoundingRectVector32F, cv::gapi::imgproc::GBoundingRectVector32F) +{ + static void run(const std::vector& in, cv::Rect& out) + { + out = cv::boundingRect(in); + } +}; + GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB) { static void run(const cv::Mat& in, cv::Mat &out) @@ -496,8 +560,15 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels() , GCPUCanny , GCPUGoodFeatures , GCPUEqualizeHist + , GCPUFindContours + , GCPUFindContoursNoOffset + , GCPUFindContoursH + , GCPUFindContoursHNoOffset , GCPUBGR2RGB , GCPURGB2YUV + , GCPUBoundingRectMat + , GCPUBoundingRectVector32S + , GCPUBoundingRectVector32F , GCPUYUV2RGB , GCPUBGR2I420 , GCPURGB2I420 diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp index d562b306c2..b27da28c87 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp @@ -66,6 +66,21 @@ GAPI_TEST_FIXTURE_SPEC_PARAMS(GoodFeaturesTest, double,int,bool), 8, cmpF, fileName, type, maxCorners, qualityLevel, minDistance, blockSize, useHarrisDetector) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursNoOffsetTest, + FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes, + cv::ContourApproximationModes), + 4, sz, type, mode, method) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursOffsetTest, <>, 0) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHNoOffsetTest, + FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes, + cv::ContourApproximationModes), + 4, sz, type, mode, method) +GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHOffsetTest, <>, 0) +GAPI_TEST_FIXTURE(BoundingRectMatTest, initMatrixRandU, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectMatVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectMatVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) +GAPI_TEST_FIXTURE(BoundingRectVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) GAPI_TEST_FIXTURE(BGR2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(RGB2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(BGR2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) diff --git a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp index c087733fa8..91e676c5e7 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp @@ -50,6 +50,27 @@ namespace rgb2yuyv(in_line_p, out_line_p, in.cols); } } + + // Draw random ellipses on given mat of given size and type + void initMatForFindingContours(cv::Mat& mat, const cv::Size& sz, const int type) + { + cv::RNG& rng = theRNG(); + mat = cv::Mat(sz, type, cv::Scalar::all(0)); + size_t numEllipses = rng.uniform(1, 10); + + for( size_t i = 0; i < numEllipses; i++ ) + { + cv::Point center; + cv::Size axes; + center.x = rng.uniform(0, sz.width); + center.y = rng.uniform(0, sz.height); + axes.width = rng.uniform(2, sz.width); + axes.height = rng.uniform(2, sz.height); + int color = rng.uniform(1, 256); + double angle = rng.uniform(0., 180.); + cv::ellipse(mat, center, axes, angle, 0., 360., color, 1, FILLED); + } + } } TEST_P(Filter2DTest, AccuracyTest) @@ -470,6 +491,267 @@ TEST_P(GoodFeaturesTest, AccuracyTest) } } +TEST_P(FindContoursNoOffsetTest, AccuracyTest) +{ + std::vector> outCtsOCV, outCtsGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, mode, method); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + cv::GArray> outCts; + outCts = cv::gapi::findContours(in, mode, method); + cv::GComputation c(GIn(in), GOut(outCts)); + c.apply(gin(in_mat1), gout(outCtsGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); +} + +TEST_P(FindContoursOffsetTest, AccuracyTest) +{ + const cv::Size sz(1280, 720); + const MatType2 type = CV_8UC1; + const cv::RetrievalModes mode = cv::RETR_EXTERNAL; + const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE; + const cv::Point offset(15, 15); + std::vector> outCtsOCV, outCtsGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, mode, method, offset); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + GOpaque gOffset; + cv::GArray> outCts; + outCts = cv::gapi::findContours(in, mode, method, gOffset); + cv::GComputation c(GIn(in, gOffset), GOut(outCts)); + c.apply(gin(in_mat1, offset), gout(outCtsGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); +} + +TEST_P(FindContoursHNoOffsetTest, AccuracyTest) +{ + std::vector> outCtsOCV, outCtsGAPI; + std::vector outHierOCV, outHierGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + cv::GArray> outCts; + cv::GArray outHier; + std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method); + cv::GComputation c(GIn(in), GOut(outCts, outHier)); + c.apply(gin(in_mat1), gout(outCtsGAPI, outHierGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); + + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + EXPECT_TRUE(AbsExactVector().to_compare_f()(outHierOCV, outHierGAPI)); +} + +TEST_P(FindContoursHOffsetTest, AccuracyTest) +{ + const cv::Size sz(1280, 720); + const MatType2 type = CV_8UC1; + const cv::RetrievalModes mode = cv::RETR_EXTERNAL; + const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE; + const cv::Point offset(15, 15); + std::vector> outCtsOCV, outCtsGAPI; + std::vector outHierOCV, outHierGAPI; + + initMatForFindingContours(in_mat1, sz, type); + out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0)); + out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0)); + + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method, offset); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + GOpaque gOffset; + cv::GArray> outCts; + cv::GArray outHier; + std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method, gOffset); + cv::GComputation c(GIn(in, gOffset), GOut(outCts, outHier)); + c.apply(gin(in_mat1, offset), gout(outCtsGAPI, outHierGAPI), getCompileArgs()); + + // Comparison ////////////////////////////////////////////////////////////// + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1)); + cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1)); + EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi)); + + EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size()); + EXPECT_TRUE(AbsExactVector().to_compare_f()(outHierOCV, outHierGAPI)); +} + +TEST_P(BoundingRectMatTest, AccuracyTest) +{ + cv::Rect out_rect_gapi, out_rect_ocv; + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(BoundingRectMatVector32STest, AccuracyTest) +{ + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorS(sz.width); + cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255)); + in_mat1 = cv::Mat(in_vectorS); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(BoundingRectMatVector32FTest, AccuracyTest) +{ + cv::RNG& rng = theRNG(); + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorF(sz.width); + const int fscale = 256; // avoid bits near ULP, generate stable test input + for (int i = 0; i < sz.width; i++) + { + cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + in_vectorF.push_back(pt); + } + in_mat1 = cv::Mat(in_vectorF); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_mat1); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + + +TEST_P(BoundingRectVector32STest, AccuracyTest) +{ + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorS(sz.width); + cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255)); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vectorS), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_vectorS); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + +TEST_P(BoundingRectVector32FTest, AccuracyTest) +{ + cv::RNG& rng = theRNG(); + cv::Rect out_rect_gapi, out_rect_ocv; + + std::vector in_vectorF(sz.width); + const int fscale = 256; // avoid bits near ULP, generate stable test input + for (int i = 0; i < sz.width; i++) + { + cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + in_vectorF.push_back(pt); + } + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::boundingRect(in); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vectorF), cv::gout(out_rect_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + out_rect_ocv = cv::boundingRect(in_vectorF); + } + + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv)); + } +} + TEST_P(BGR2RGBTest, AccuracyTest) { // G-API code ////////////////////////////////////////////////////////////// diff --git a/modules/gapi/test/common/gapi_tests_common.hpp b/modules/gapi/test/common/gapi_tests_common.hpp index bb045b83d1..948476fa10 100644 --- a/modules/gapi/test/common/gapi_tests_common.hpp +++ b/modules/gapi/test/common/gapi_tests_common.hpp @@ -463,6 +463,7 @@ struct TestWithParamsSpecific : public TestWithParamsBase; using compare_scalar_f = std::function; +using compare_rect_f = std::function; template using compare_vector_f = std::function &a, @@ -489,6 +490,7 @@ private: using CompareMats = CompareF; using CompareScalars = CompareF; +using CompareRects = CompareF; template using CompareVectors = CompareF, std::vector>; @@ -535,6 +537,27 @@ struct WrappableScalar } }; +template +struct WrappableRect +{ + compare_rect_f to_compare_f() + { + T t = *static_cast(this); + return [t](const cv::Rect &a, const cv::Rect &b) + { + return t(a, b); + }; + } + + CompareRects to_compare_obj() + { + T t = *static_cast(this); + std::stringstream ss; + ss << t; + return CompareRects(to_compare_f(), ss.str()); + } +}; + template struct WrappableVector { @@ -719,13 +742,15 @@ public: double err_Inf = cv::norm(in1, in2, NORM_INF); if (err_Inf > _inf_tol) { - std::cout << "ToleranceColor error: err_Inf=" << err_Inf << " tolerance=" << _inf_tol << std::endl;; + std::cout << "ToleranceColor error: err_Inf=" << err_Inf + << " tolerance=" << _inf_tol << std::endl; return false; } double err = cv::norm(in1, in2, NORM_L1 | NORM_RELATIVE); if (err > _tol) { - std::cout << "ToleranceColor error: err=" << err << " tolerance=" << _tol << std::endl;; + std::cout << "ToleranceColor error: err=" << err + << " tolerance=" << _tol << std::endl; return false; } } @@ -749,7 +774,8 @@ public: double abs_err = std::abs(in1[0] - in2[0]) / std::max(1.0, std::abs(in2[0])); if (abs_err > _tol) { - std::cout << "AbsToleranceScalar error: abs_err=" << abs_err << " tolerance=" << _tol << " in1[0]" << in1[0] << " in2[0]" << in2[0] << std::endl;; + std::cout << "AbsToleranceScalar error: abs_err=" << abs_err << " tolerance=" << _tol + << " in1[0]" << in1[0] << " in2[0]" << in2[0] << std::endl; return false; } else @@ -765,6 +791,46 @@ private: double _tol; }; +class IoUToleranceRect : public WrappableRect +{ +public: + IoUToleranceRect(double tol) : _tol(tol) {} + bool operator() (const cv::Rect& in1, const cv::Rect& in2) const + { + // determine the (x, y)-coordinates of the intersection rectangle + int xA = max(in1.x, in2.x); + int yA = max(in1.y, in2.y); + int xB = min(in1.br().x, in2.br().x); + int yB = min(in1.br().y, in2.br().y); + // compute the area of intersection rectangle + int interArea = max(0, xB - xA) * max(0, yB - yA); + // compute the area of union rectangle + int unionArea = in1.area() + in2.area() - interArea; + + double iou = interArea / unionArea; + double err = 1 - iou; + if (err > _tol) + { + std::cout << "IoUToleranceRect error: err=" << err << " tolerance=" << _tol + << " in1.x=" << in1.x << " in2.x=" << in2.x + << " in1.y=" << in1.y << " in2.y=" << in2.y + << " in1.width=" << in1.width << " in2.width=" << in2.width + << " in1.height=" << in1.height << " in2.height=" << in2.height << std::endl; + return false; + } + else + { + return true; + } + } + friend std::ostream& operator<<(std::ostream& os, const IoUToleranceRect& obj) + { + return os << "IoUToleranceRect(" << std::to_string(obj._tol) << ")"; + } +private: + double _tol; +}; + template class AbsExactVector : public WrappableVector, Elem> { @@ -803,6 +869,11 @@ inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_sca return os << "compare_scalar_f"; } +inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_rect_f&) +{ + return os << "compare_rect_f"; +} + template inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_vector_f&) { @@ -849,6 +920,37 @@ inline std::ostream& operator<<(std::ostream& os, NormTypes op) return os; } +inline std::ostream& operator<<(std::ostream& os, RetrievalModes op) +{ +#define CASE(v) case RetrievalModes::v: os << #v; break + switch (op) + { + CASE(RETR_EXTERNAL); + CASE(RETR_LIST); + CASE(RETR_CCOMP); + CASE(RETR_TREE); + CASE(RETR_FLOODFILL); + default: GAPI_Assert(false && "unknown RetrievalModes value"); + } +#undef CASE + return os; +} + +inline std::ostream& operator<<(std::ostream& os, ContourApproximationModes op) +{ +#define CASE(v) case ContourApproximationModes::v: os << #v; break + switch (op) + { + CASE(CHAIN_APPROX_NONE); + CASE(CHAIN_APPROX_SIMPLE); + CASE(CHAIN_APPROX_TC89_L1); + CASE(CHAIN_APPROX_TC89_KCOS); + default: GAPI_Assert(false && "unknown ContourApproximationModes value"); + } +#undef CASE + return os; +} + inline std::ostream& operator<<(std::ostream& os, MorphTypes op) { #define CASE(v) case MorphTypes::v: os << #v; break diff --git a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp index 7cba6b05db..cea0e0da32 100644 --- a/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp +++ b/modules/gapi/test/cpu/gapi_imgproc_tests_cpu.cpp @@ -265,6 +265,78 @@ INSTANTIATE_TEST_CASE_P(GoodFeaturesInternalTestCPU, GoodFeaturesTest, Values(3), Values(true))); +INSTANTIATE_TEST_CASE_P(FindContoursNoOffsetTestCPU, FindContoursNoOffsetTest, + Combine(Values(IMGPROC_CPU), + Values(cv::Size(1280, 720)), + Values(CV_8UC1), + Values(RETR_EXTERNAL), + Values(CHAIN_APPROX_NONE))); + +INSTANTIATE_TEST_CASE_P(FindContoursOffsetTestCPU, FindContoursOffsetTest, + Values(IMGPROC_CPU)); + +INSTANTIATE_TEST_CASE_P(FindContoursHNoOffsetTestCPU, FindContoursHNoOffsetTest, + Combine(Values(IMGPROC_CPU), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(CV_8UC1), + Values(RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE), + Values(CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS))); + +INSTANTIATE_TEST_CASE_P(FindContoursHNoOffset32STestCPU, FindContoursHNoOffsetTest, + Combine(Values(IMGPROC_CPU), + Values(cv::Size(1280, 720), + cv::Size(640, 480)), + Values(CV_32SC1), + Values(RETR_CCOMP, RETR_FLOODFILL), + Values(CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE, + CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS))); + +INSTANTIATE_TEST_CASE_P(FindContoursHOffsetTestCPU, FindContoursHOffsetTest, + Values(IMGPROC_CPU)); + +INSTANTIATE_TEST_CASE_P(BoundingRectMatTestCPU, BoundingRectMatTest, + Combine(Values( CV_8UC1 ), + Values(cv::Size(1280, 720), + cv::Size(640, 480), + cv::Size(128, 128)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(0).to_compare_obj()))); + +INSTANTIATE_TEST_CASE_P(BoundingRectMatVector32STestCPU, BoundingRectMatVector32STest, + Combine(Values(-1), + Values(cv::Size(1280, 1), + cv::Size(128, 1)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(0).to_compare_obj()))); + + INSTANTIATE_TEST_CASE_P(BoundingRectMatVector32FTestCPU, BoundingRectMatVector32FTest, + Combine(Values(-1), + Values(cv::Size(1280, 1), + cv::Size(128, 1)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(1e-5).to_compare_obj()))); + +INSTANTIATE_TEST_CASE_P(BoundingRectVector32STestCPU, BoundingRectVector32STest, + Combine(Values(-1), + Values(cv::Size(1280, 1), + cv::Size(128, 1)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(0).to_compare_obj()))); + + INSTANTIATE_TEST_CASE_P(BoundingRectVector32FTestCPU, BoundingRectVector32FTest, + Combine(Values(-1), + Values(cv::Size(1280, 1), + cv::Size(128, 1)), + Values(-1), + Values(IMGPROC_CPU), + Values(IoUToleranceRect(1e-5).to_compare_obj()))); + INSTANTIATE_TEST_CASE_P(BGR2RGBTestCPU, BGR2RGBTest, Combine(Values(CV_8UC3), Values(cv::Size(1280, 720), From 724001aa0f646aa58913c5e46917d104334275ed Mon Sep 17 00:00:00 2001 From: Ruslan Garnov Date: Tue, 3 Nov 2020 18:50:49 +0300 Subject: [PATCH 080/152] Added multidimensional RMat::View steps --- modules/gapi/include/opencv2/gapi/rmat.hpp | 27 ++-- modules/gapi/src/api/rmat.cpp | 70 ++++++++-- modules/gapi/src/backends/common/gbackend.hpp | 16 ++- modules/gapi/test/rmat/rmat_test_common.hpp | 16 ++- modules/gapi/test/rmat/rmat_view_tests.cpp | 130 ++++++++++++++++-- modules/gapi/test/s11n/gapi_s11n_tests.cpp | 9 +- 6 files changed, 230 insertions(+), 38 deletions(-) diff --git a/modules/gapi/include/opencv2/gapi/rmat.hpp b/modules/gapi/include/opencv2/gapi/rmat.hpp index ff834b46b1..f50bd08b65 100644 --- a/modules/gapi/include/opencv2/gapi/rmat.hpp +++ b/modules/gapi/include/opencv2/gapi/rmat.hpp @@ -54,11 +54,11 @@ public: { public: using DestroyCallback = std::function; + using stepsT = std::vector; View() = default; - View(const GMatDesc& desc, uchar* data, size_t step = 0u, DestroyCallback&& cb = nullptr) - : m_desc(desc), m_data(data), m_step(step == 0u ? elemSize()*cols() : step), m_cb(std::move(cb)) - {} + View(const GMatDesc& desc, uchar* data, const stepsT& steps = {}, DestroyCallback&& cb = nullptr); + View(const GMatDesc& desc, uchar* data, size_t step, DestroyCallback&& cb = nullptr); View(const View&) = delete; View& operator=(const View&) = delete; @@ -70,23 +70,30 @@ public: const std::vector& dims() const { return m_desc.dims; } int cols() const { return m_desc.size.width; } int rows() const { return m_desc.size.height; } - int type() const { return CV_MAKE_TYPE(depth(), chan()); } + int type() const; int depth() const { return m_desc.depth; } int chan() const { return m_desc.chan; } size_t elemSize() const { return CV_ELEM_SIZE(type()); } - template T* ptr(int y = 0, int x = 0) { - return reinterpret_cast(m_data + m_step*y + x*CV_ELEM_SIZE(type())); + template T* ptr(int y = 0) { + return reinterpret_cast(m_data + step()*y); } - template const T* ptr(int y = 0, int x = 0) const { - return reinterpret_cast(m_data + m_step*y + x*CV_ELEM_SIZE(type())); + template const T* ptr(int y = 0) const { + return reinterpret_cast(m_data + step()*y); } - size_t step() const { return m_step; } + template T* ptr(int y, int x) { + return reinterpret_cast(m_data + step()*y + step(1)*x); + } + template const T* ptr(int y, int x) const { + return reinterpret_cast(m_data + step()*y + step(1)*x); + } + size_t step(size_t i = 0) const { GAPI_DbgAssert(i{desc.size.height, desc.size.width} + : desc.dims; + View::stepsT steps(dims.size(), 0u); + auto type = typeFromDesc(desc); + steps.back() = CV_ELEM_SIZE(type); + for (int i = static_cast(dims.size())-2; i >= 0; i--) { + steps[i] = steps[i+1]*dims[i]; + } + return steps; +} +} // anonymous namespace + +View::View(const cv::GMatDesc& desc, uchar* data, size_t step, DestroyCallback&& cb) + : m_desc(checkDesc(desc)) + , m_data(data) + , m_steps([this, step](){ + GAPI_Assert(m_desc.dims.empty()); + auto steps = defaultSteps(m_desc); + if (step != 0u) { + steps[0] = step; + } + return steps; + }()) + , m_cb(std::move(cb)) { +} + +View::View(const cv::GMatDesc& desc, uchar* data, const stepsT &steps, DestroyCallback&& cb) + : m_desc(checkDesc(desc)) + , m_data(data) + , m_steps(steps == stepsT{} ? defaultSteps(m_desc): steps) + , m_cb(std::move(cb)) { +} + +int View::type() const { return typeFromDesc(m_desc); } + // There is an issue with default generated operator=(View&&) on Mac: -// it doesn't nullify m_cb of a moved object +// it doesn't nullify m_cb of the moved object View& View::operator=(View&& v) { - m_desc = v.m_desc; - m_data = v.m_data; - m_step = v.m_step; - m_cb = v.m_cb; - v.m_desc = {}; - v.m_data = nullptr; - v.m_step = 0u; - v.m_cb = nullptr; + m_desc = v.m_desc; + m_data = v.m_data; + m_steps = v.m_steps; + m_cb = v.m_cb; + v.m_desc = {}; + v.m_data = nullptr; + v.m_steps = {0u}; + v.m_cb = nullptr; return *this; } diff --git a/modules/gapi/src/backends/common/gbackend.hpp b/modules/gapi/src/backends/common/gbackend.hpp index 8c1749377e..4914715fa7 100644 --- a/modules/gapi/src/backends/common/gbackend.hpp +++ b/modules/gapi/src/backends/common/gbackend.hpp @@ -23,12 +23,26 @@ namespace cv { namespace gimpl { inline cv::Mat asMat(RMat::View& v) { +#if !defined(GAPI_STANDALONE) + return v.dims().empty() ? cv::Mat(v.rows(), v.cols(), v.type(), v.ptr(), v.step()) + : cv::Mat(v.dims(), v.type(), v.ptr(), v.steps().data()); +#else + // FIXME: add a check that steps are default return v.dims().empty() ? cv::Mat(v.rows(), v.cols(), v.type(), v.ptr(), v.step()) : cv::Mat(v.dims(), v.type(), v.ptr()); + +#endif } inline RMat::View asView(const Mat& m, RMat::View::DestroyCallback&& cb = nullptr) { - // FIXME: View doesn't support multidimensional cv::Mat's +#if !defined(GAPI_STANDALONE) + RMat::View::stepsT steps(m.dims); + for (int i = 0; i < m.dims; i++) { + steps[i] = m.step[i]; + } + return RMat::View(cv::descr_of(m), m.data, steps, std::move(cb)); +#else return RMat::View(cv::descr_of(m), m.data, m.step, std::move(cb)); +#endif } class RMatAdapter : public RMat::Adapter { diff --git a/modules/gapi/test/rmat/rmat_test_common.hpp b/modules/gapi/test/rmat/rmat_test_common.hpp index 47a744499e..5685d06253 100644 --- a/modules/gapi/test/rmat/rmat_test_common.hpp +++ b/modules/gapi/test/rmat/rmat_test_common.hpp @@ -19,14 +19,18 @@ public: : m_mat(m), m_callbackCalled(callbackCalled) {} virtual RMat::View access(RMat::Access access) override { + RMat::View::stepsT steps(m_mat.dims); + for (int i = 0; i < m_mat.dims; i++) { + steps[i] = m_mat.step[i]; + } if (access == RMat::Access::W) { - return RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step, + return RMat::View(cv::descr_of(m_mat), m_mat.data, steps, [this](){ EXPECT_FALSE(m_callbackCalled); m_callbackCalled = true; }); } else { - return RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step); + return RMat::View(cv::descr_of(m_mat), m_mat.data, steps); } } virtual cv::GMatDesc desc() const override { return cv::descr_of(m_mat); } @@ -42,8 +46,12 @@ public: : m_deviceMat(m), m_hostMat(m.clone()), m_callbackCalled(callbackCalled) {} virtual RMat::View access(RMat::Access access) override { + RMat::View::stepsT steps(m_hostMat.dims); + for (int i = 0; i < m_hostMat.dims; i++) { + steps[i] = m_hostMat.step[i]; + } if (access == RMat::Access::W) { - return RMat::View(cv::descr_of(m_hostMat), m_hostMat.data, m_hostMat.step, + return RMat::View(cv::descr_of(m_hostMat), m_hostMat.data, steps, [this](){ EXPECT_FALSE(m_callbackCalled); m_callbackCalled = true; @@ -51,7 +59,7 @@ public: }); } else { m_deviceMat.copyTo(m_hostMat); - return RMat::View(cv::descr_of(m_hostMat), m_hostMat.data, m_hostMat.step); + return RMat::View(cv::descr_of(m_hostMat), m_hostMat.data, steps); } } virtual cv::GMatDesc desc() const override { return cv::descr_of(m_hostMat); } diff --git a/modules/gapi/test/rmat/rmat_view_tests.cpp b/modules/gapi/test/rmat/rmat_view_tests.cpp index abc251660b..14025231a7 100644 --- a/modules/gapi/test/rmat/rmat_view_tests.cpp +++ b/modules/gapi/test/rmat/rmat_view_tests.cpp @@ -15,6 +15,8 @@ namespace opencv_test using cv::GMatDesc; using View = cv::RMat::View; using cv::Mat; +using cv::gimpl::asMat; +using cv::gimpl::asView; using namespace ::testing; static void expect_eq_desc(const GMatDesc& desc, const View& view) { @@ -22,7 +24,8 @@ static void expect_eq_desc(const GMatDesc& desc, const View& view) { EXPECT_EQ(desc.dims, view.dims()); EXPECT_EQ(desc.size.width, view.cols()); EXPECT_EQ(desc.size.height, view.rows()); - EXPECT_EQ(CV_MAKE_TYPE(desc.depth,desc.chan), view.type()); + EXPECT_EQ(desc.depth, view.depth()); + EXPECT_EQ(desc.chan, view.chan()); EXPECT_EQ(desc.depth, view.depth()); EXPECT_EQ(desc.chan, view.chan()); } @@ -40,10 +43,10 @@ TEST_P(RMatViewTest, ConstructionFromMat) { auto type = GetParam(); Mat mat(8,8,type); const auto desc = cv::descr_of(mat); - View view(cv::descr_of(mat), mat.ptr(), mat.step1()); + View view = asView(mat); expect_eq_desc(desc, view); EXPECT_EQ(mat.ptr(), view.ptr()); - EXPECT_EQ(mat.step1(), view.step()); + EXPECT_EQ(mat.step, view.step()); } TEST(RMatView, TestConstructionFromMatND) { @@ -66,16 +69,98 @@ TEST_P(RMatViewTest, DefaultStep) { EXPECT_EQ(static_cast(desc.size.width)*CV_ELEM_SIZE(type), view.step()); } -static Mat asMat(View& view) { - return Mat(view.size(), view.type(), view.ptr(), view.step()); +struct RMatViewNDTest : public TestWithParam< + std::tuple>{}; +TEST_P(RMatViewNDTest, DefaultStep) { + int depth = 0, ndims = 0; + std::tie(depth, ndims) = GetParam(); + std::vector dims(ndims, 12); + GMatDesc desc; + desc.dims = dims; + desc.depth = depth; + GAPI_Assert(desc.chan == -1); + auto elemSize = CV_ELEM_SIZE(depth); + auto total = std::accumulate(dims.begin(), dims.end(), elemSize, std::multiplies()); + std::vector data(total); + View view(desc, data.data()); + auto step = static_cast(total/dims[0]); + EXPECT_EQ(step, view.step(0)); + for (int i = 1; i < ndims; i++) { + step /= dims[i]; + EXPECT_EQ(step, view.step(i)); + } } +TEST_P(RMatViewNDTest, StepFromMat) { + int depth = 0, ndims = 0; + std::tie(depth, ndims) = GetParam(); + std::vector dims(ndims, 12); + cv::Mat mat(dims, depth); + auto view = asView(mat); + EXPECT_EQ(mat.ptr(), view.ptr()); + for (int i = 0; i < ndims; i++) { + EXPECT_EQ(mat.step[i], view.step(i)); + } +} + +TEST_P(RMatViewNDTest, StepFromView) { + int depth = 0, ndims = 0; + std::tie(depth, ndims) = GetParam(); + std::vector dims(ndims, 12); + std::vector aligned(ndims, 16); + GMatDesc desc; + desc.dims = dims; + desc.depth = depth; + GAPI_Assert(desc.chan == -1); + auto elemSize = CV_ELEM_SIZE(depth); + auto total = std::accumulate(aligned.begin(), aligned.end(), elemSize, std::multiplies()); + std::vector data(total); + View::stepsT steps(ndims); + auto step = static_cast(total/aligned[0]); + steps[0] = step; + for (int i = 1; i < ndims; i++) { + step /= aligned[i]; + steps[i] = step; + } + View view(desc, data.data(), steps); + auto mat = asMat(view); + EXPECT_EQ(mat.ptr(), view.ptr()); + for (int i = 0; i < ndims; i++) { + EXPECT_EQ(mat.step[i], view.step(i)); + } +} + +INSTANTIATE_TEST_CASE_P(Test, RMatViewNDTest, + Combine(Values(CV_8U, CV_32F), // depth + Values(1,2,3,4,7))); // ndims + +struct RMatViewNDTestNegative : public TestWithParam< + std::tuple>{}; +TEST_P(RMatViewNDTestNegative, DefaultStep) { + int depth = 0, chan = 0, ndims = 0; + std::tie(depth, chan, ndims) = GetParam(); + std::vector dims(ndims, 12); + GMatDesc desc; + desc.dims = dims; + desc.depth = depth; + desc.chan = chan; + auto elemSize = CV_ELEM_SIZE(depth); + auto total = std::accumulate(dims.begin(), dims.end(), elemSize, std::multiplies()); + std::vector data(total); + EXPECT_ANY_THROW(View view(desc, data.data())); +} + +INSTANTIATE_TEST_CASE_P(Test, RMatViewNDTestNegative, + Combine(Values(CV_8U, CV_32F), // depth + Values(1,2,3,4), // chan + Values(2,4,7))); // ndims + TEST_P(RMatViewTest, NonDefaultStepInput) { auto type = GetParam(); Mat bigMat(16,16,type); cv::randn(bigMat, cv::Scalar::all(127), cv::Scalar::all(40)); Mat mat = bigMat(cv::Rect{4,4,8,8}); - View view(cv::descr_of(mat), mat.data, mat.step); + View view = asView(mat); const auto viewMat = asMat(view); Mat ref, out; cv::Size ksize{1,1}; @@ -90,7 +175,36 @@ TEST_P(RMatViewTest, NonDefaultStepOutput) { cv::randn(mat, cv::Scalar::all(127), cv::Scalar::all(40)); Mat bigMat = Mat::zeros(16,16,type); Mat out = bigMat(cv::Rect{4,4,8,8}); - View view(cv::descr_of(out), out.ptr(), out.step); + View view = asView(out); + auto viewMat = asMat(view); + Mat ref; + cv::Size ksize{1,1}; + cv::blur(mat, viewMat, ksize); + cv::blur(mat, ref, ksize); + EXPECT_EQ(0, cvtest::norm(ref, out, NORM_INF)); +} + +TEST_P(RMatViewTest, NonDefaultStep2DInput) { + auto type = GetParam(); + Mat bigMat(16,16,type); + cv::randn(bigMat, cv::Scalar::all(127), cv::Scalar::all(40)); + Mat mat = bigMat(cv::Rect{4,4,8,8}); + View view(cv::descr_of(mat), mat.data, mat.step); + const auto viewMat = asMat(view); + Mat ref, out; + cv::Size ksize{1,1}; + cv::blur(viewMat, out, ksize); + cv::blur( mat, ref, ksize); + EXPECT_EQ(0, cvtest::norm(ref, out, NORM_INF)); +} + +TEST_P(RMatViewTest, NonDefaultStep2DOutput) { + auto type = GetParam(); + Mat mat(8,8,type); + cv::randn(mat, cv::Scalar::all(127), cv::Scalar::all(40)); + Mat bigMat = Mat::zeros(16,16,type); + Mat out = bigMat(cv::Rect{4,4,8,8}); + View view(cv::descr_of(out), out.data, out.step); auto viewMat = asMat(view); Mat ref; cv::Size ksize{1,1}; @@ -107,7 +221,7 @@ struct RMatViewCallbackTest : public ::testing::Test { : mat(8,8,CV_8UC1) { cv::randn(mat, cv::Scalar::all(127), cv::Scalar::all(40)); } - View getView() { return {cv::descr_of(mat), mat.ptr(), mat.step1(), [this](){ callbackCalls++; }}; } + View getView() { return asView(mat, [this](){ callbackCalls++; }); } int callbackCalls = 0; Mat mat; }; diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp index 2fc1e46253..74aac19306 100644 --- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp +++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp @@ -2,6 +2,7 @@ #include "backends/common/serialization.hpp" #include +#include <../src/backends/common/gbackend.hpp> // asView namespace { struct EmptyCustomType { }; @@ -134,12 +135,8 @@ public: MyRMatAdapter(cv::Mat m, int value, const std::string& str) : m_mat(m), m_value(value), m_str(str) {} - virtual cv::RMat::View access(cv::RMat::Access access) override { - if (access == cv::RMat::Access::W) { - return cv::RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step); - } else { - return cv::RMat::View(cv::descr_of(m_mat), m_mat.data, m_mat.step); - } + virtual cv::RMat::View access(cv::RMat::Access) override { + return cv::gimpl::asView(m_mat); } virtual cv::GMatDesc desc() const override { return cv::descr_of(m_mat); } virtual void serialize(cv::gapi::s11n::IOStream& os) override { From d986cc4861b978415fc20c3a0dc6f16ff9d0bcdf Mon Sep 17 00:00:00 2001 From: Maksim Shabunin Date: Thu, 12 Nov 2020 13:38:26 +0300 Subject: [PATCH 081/152] calib3d: uninitialzed fields in usac --- modules/calib3d/src/usac.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/calib3d/src/usac.hpp b/modules/calib3d/src/usac.hpp index c18de92479..06a0ff2056 100644 --- a/modules/calib3d/src/usac.hpp +++ b/modules/calib3d/src/usac.hpp @@ -421,7 +421,7 @@ struct SPRT_history { double epsilon, delta, A; // number of samples processed by test int tested_samples; // k - SPRT_history () { + SPRT_history () : epsilon(0), delta(0), A(0) { tested_samples = 0; } }; From d9c5b85671471a3b3ecdde09fb33170803f7767d Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Fri, 13 Nov 2020 09:00:54 +0530 Subject: [PATCH 082/152] Use in-place sort --- samples/python/stitching_detailed.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/python/stitching_detailed.py b/samples/python/stitching_detailed.py index b0cf78a759..cd3f063e35 100644 --- a/samples/python/stitching_detailed.py +++ b/samples/python/stitching_detailed.py @@ -387,7 +387,7 @@ def main(): focals = [] for cam in cameras: focals.append(cam.focal) - sorted(focals) + focals.sort() if len(focals) % 2 == 1: warped_image_scale = focals[len(focals) // 2] else: From 011d8e80d8a69d736a59619b7dcae768023bd045 Mon Sep 17 00:00:00 2001 From: Igor Murzov Date: Mon, 9 Nov 2020 19:03:46 +0300 Subject: [PATCH 083/152] videoio: Support Orbbec Astra 3D cameras using OpenNI2 API Only depth sensor is supported. Color sensor is accessible as a regular UVC camera. --- modules/videoio/include/opencv2/videoio.hpp | 3 ++- modules/videoio/src/cap_openni2.cpp | 11 +++++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/modules/videoio/include/opencv2/videoio.hpp b/modules/videoio/include/opencv2/videoio.hpp index eb5645ab77..c429699d3f 100644 --- a/modules/videoio/include/opencv2/videoio.hpp +++ b/modules/videoio/include/opencv2/videoio.hpp @@ -112,6 +112,7 @@ enum VideoCaptureAPIs { CAP_REALSENSE = 1500, //!< Synonym for CAP_INTELPERC CAP_OPENNI2 = 1600, //!< OpenNI2 (for Kinect) CAP_OPENNI2_ASUS = 1610, //!< OpenNI2 (for Asus Xtion and Occipital Structure sensors) + CAP_OPENNI2_ASTRA= 1620, //!< OpenNI2 (for Orbbec Astra) CAP_GPHOTO2 = 1700, //!< gPhoto2 connection CAP_GSTREAMER = 1800, //!< GStreamer CAP_FFMPEG = 1900, //!< Open and record video file or stream using the FFMPEG library @@ -825,7 +826,7 @@ public: @throws Exception %Exception on stream errors (check .isOpened() to filter out malformed streams) or VideoCapture type is not supported The primary use of the function is in multi-camera environments. - The method fills the ready state vector, grabbs video frame, if camera is ready. + The method fills the ready state vector, grabs video frame, if camera is ready. After this call use VideoCapture::retrieve() to decode and fetch frame data. */ diff --git a/modules/videoio/src/cap_openni2.cpp b/modules/videoio/src/cap_openni2.cpp index adec7359fb..1d455442fa 100644 --- a/modules/videoio/src/cap_openni2.cpp +++ b/modules/videoio/src/cap_openni2.cpp @@ -103,7 +103,7 @@ private: class CvCapture_OpenNI2 : public CvCapture { public: - enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_MAX=1 }; + enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_ORBBEC_ASTRA=2, DEVICE_MAX=2 }; static const int INVALID_PIXEL_VAL = 0; static const int INVALID_COORDINATE_VAL = 0; @@ -116,6 +116,7 @@ public: CvCapture_OpenNI2(const char * filename); virtual ~CvCapture_OpenNI2(); + virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_OPENNI2; } virtual double getProperty(int propIdx) const CV_OVERRIDE; virtual bool setProperty(int probIdx, double propVal) CV_OVERRIDE; virtual bool grabFrame() CV_OVERRIDE; @@ -261,7 +262,8 @@ CvCapture_OpenNI2::CvCapture_OpenNI2(int index, const char * filename) : index %= 10; } // Asus XTION and Occipital Structure Sensor do not have an image generator - needColor = (deviceType != DEVICE_ASUS_XTION); + // Orbbec Astra cameras don't provide OpenNI interface for color stream reading + needColor = (deviceType != DEVICE_ASUS_XTION) && (deviceType != DEVICE_ORBBEC_ASTRA); // find appropriate device URI openni::Array ldevs; @@ -300,6 +302,11 @@ CvCapture_OpenNI2::CvCapture_OpenNI2(int index, const char * filename) : setProperty(CV_CAP_PROP_OPENNI2_MIRROR, 0.0); isContextOpened = true; + + CV_LOG_INFO(NULL, cv::format("Opened OpenNI camera: %s %s (%04x:%04x)", + device.getDeviceInfo().getVendor(), device.getDeviceInfo().getName(), + device.getDeviceInfo().getUsbVendorId(), device.getDeviceInfo().getUsbProductId()) + ); } CvCapture_OpenNI2::~CvCapture_OpenNI2() From 0e4b5b88dcc379259c5e6e530c25181916abbda9 Mon Sep 17 00:00:00 2001 From: Ruslan Garnov Date: Thu, 5 Nov 2020 02:27:32 +0300 Subject: [PATCH 084/152] Added support of 1x1x1xN input for parseYolo --- modules/gapi/src/backends/cpu/gnnparsers.cpp | 34 +++++++++++++++---- modules/gapi/test/common/gapi_core_tests.hpp | 2 +- .../gapi/test/common/gapi_core_tests_inl.hpp | 2 +- .../test/common/gapi_parsers_tests_common.hpp | 19 +++++++++-- modules/gapi/test/cpu/gapi_core_tests_cpu.cpp | 7 +++- 5 files changed, 52 insertions(+), 12 deletions(-) diff --git a/modules/gapi/src/backends/cpu/gnnparsers.cpp b/modules/gapi/src/backends/cpu/gnnparsers.cpp index 234382d530..a5e4bf5f85 100644 --- a/modules/gapi/src/backends/cpu/gnnparsers.cpp +++ b/modules/gapi/src/backends/cpu/gnnparsers.cpp @@ -246,6 +246,28 @@ void parseSSD(const cv::Mat& in_ssd_result, } } +static void checkYoloDims(const MatSize& dims) { + const auto d = dims.dims(); + // Accept 1x13x13xN and 13x13xN + GAPI_Assert(d >= 2); + if (d >= 3) { + if (dims[d-2] == 13) { + GAPI_Assert(dims[d-1]%5 == 0); + GAPI_Assert(dims[d-2] == 13); + GAPI_Assert(dims[d-3] == 13); + for (int i = 0; i < d-3; i++) { + GAPI_Assert(dims[i] == 1); + } + return; + } + } + // Accept 1x1x1xN, 1x1xN, 1xN + GAPI_Assert(dims[d-1]%(5*13*13) == 0); + for (int i = 0; i < d-1; i++) { + GAPI_Assert(dims[i] == 1); + } +} + void parseYolo(const cv::Mat& in_yolo_result, const cv::Size& in_size, const float confidence_threshold, @@ -255,12 +277,12 @@ void parseYolo(const cv::Mat& in_yolo_result, std::vector& out_labels) { const auto& dims = in_yolo_result.size; - GAPI_Assert(dims.dims() == 4); - GAPI_Assert(dims[0] == 1); - GAPI_Assert(dims[1] == 13); - GAPI_Assert(dims[2] == 13); - GAPI_Assert(dims[3] % 5 == 0); // 5 boxes - const auto num_classes = dims[3] / 5 - 5; + checkYoloDims(dims); + int acc = 1; + for (int i = 0; i < dims.dims(); i++) { + acc *= dims[i]; + } + const auto num_classes = acc/(5*13*13)-5; GAPI_Assert(num_classes > 0); GAPI_Assert(0 < nms_threshold && nms_threshold <= 1); out_boxes.clear(); diff --git a/modules/gapi/test/common/gapi_core_tests.hpp b/modules/gapi/test/common/gapi_core_tests.hpp index 4a0a7641f9..889e32f1c1 100644 --- a/modules/gapi/test/common/gapi_core_tests.hpp +++ b/modules/gapi/test/common/gapi_core_tests.hpp @@ -157,7 +157,7 @@ GAPI_TEST_EXT_BASE_FIXTURE(ParseSSDBLTest, ParserSSDTest, initNothing, GAPI_TEST_EXT_BASE_FIXTURE(ParseSSDTest, ParserSSDTest, initNothing, FIXTURE_API(float, bool, bool), 3, confidence_threshold, alignment_to_square, filter_out_of_bounds) GAPI_TEST_EXT_BASE_FIXTURE(ParseYoloTest, ParserYoloTest, initNothing, - FIXTURE_API(float, float, int), 3, confidence_threshold, nms_threshold, num_classes) + FIXTURE_API(float, float, int, std::pair), 4, confidence_threshold, nms_threshold, num_classes, dims_config) GAPI_TEST_FIXTURE(SizeTest, initMatrixRandU, <>, 0) GAPI_TEST_FIXTURE(SizeRTest, initNothing, <>, 0) } // opencv_test diff --git a/modules/gapi/test/common/gapi_core_tests_inl.hpp b/modules/gapi/test/common/gapi_core_tests_inl.hpp index 1a167ad5ea..045b556369 100644 --- a/modules/gapi/test/common/gapi_core_tests_inl.hpp +++ b/modules/gapi/test/common/gapi_core_tests_inl.hpp @@ -1666,7 +1666,7 @@ TEST_P(ParseSSDTest, ParseTest) TEST_P(ParseYoloTest, ParseTest) { - cv::Mat in_mat = generateYoloOutput(num_classes); + cv::Mat in_mat = generateYoloOutput(num_classes, dims_config); auto anchors = cv::gapi::nn::parsers::GParseYolo::defaultAnchors(); std::vector boxes_gapi, boxes_ref; std::vector labels_gapi, labels_ref; diff --git a/modules/gapi/test/common/gapi_parsers_tests_common.hpp b/modules/gapi/test/common/gapi_parsers_tests_common.hpp index 127a1c5a5e..91dcca7b3e 100644 --- a/modules/gapi/test/common/gapi_parsers_tests_common.hpp +++ b/modules/gapi/test/common/gapi_parsers_tests_common.hpp @@ -225,13 +225,26 @@ private: class ParserYoloTest { public: - cv::Mat generateYoloOutput(const int num_classes) + cv::Mat generateYoloOutput(const int num_classes, std::pair dims_config = {false, 4}) { - std::vector dims = { 1, 13, 13, (num_classes + 5) * 5 }; + bool one_dim = false; + int num_dims = 0; + std::tie(one_dim, num_dims) = dims_config; + GAPI_Assert(num_dims <= 4); + GAPI_Assert((!one_dim && num_dims >= 3) || + ( one_dim && num_dims >= 1)); + std::vector dims(num_dims, 1); + if (one_dim) { + dims.back() = (num_classes+5)*5*13*13; + } else { + dims.back() = (num_classes+5)*5; + dims[num_dims-2] = 13; + dims[num_dims-3] = 13; + } cv::Mat mat(dims, CV_32FC1); auto data = mat.ptr(); - const size_t range = dims[0] * dims[1] * dims[2] * dims[3]; + const size_t range = std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); for (size_t i = 0; i < range; ++i) { data[i] = static_cast(std::rand()) / RAND_MAX; diff --git a/modules/gapi/test/cpu/gapi_core_tests_cpu.cpp b/modules/gapi/test/cpu/gapi_core_tests_cpu.cpp index 53faa28178..595b63dd1f 100644 --- a/modules/gapi/test/cpu/gapi_core_tests_cpu.cpp +++ b/modules/gapi/test/cpu/gapi_core_tests_cpu.cpp @@ -531,7 +531,12 @@ INSTANTIATE_TEST_CASE_P(ParseTestCPU, ParseYoloTest, Values(CORE_CPU), Values(0.3f, 0.5f, 0.7f), Values(0.5f, 1.0f), - Values(80, 7))); + Values(80, 7), + Values(std::make_pair(false, 3), + std::make_pair(false, 4), + std::make_pair(true, 2), + std::make_pair(true, 3), + std::make_pair(true, 4)))); INSTANTIATE_TEST_CASE_P(SizeTestCPU, SizeTest, Combine(Values(CV_8UC1, CV_8UC3, CV_32FC1), From 41c2669476ba81d684bfd81f83d83c6cb96db027 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Thu, 12 Nov 2020 19:47:54 +0000 Subject: [PATCH 085/152] java: robust code generation - the same generated code from Python2/3 - avoid randomized output due to unpredictable dict/set order --- modules/java/generator/gen_java.py | 45 ++++++++++++++++-------------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index 03075c5ae7..8e5c69e788 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -105,7 +105,7 @@ T_CPP_MODULE = Template(read_contents(os.path.join(SCRIPT_DIR, 'templates/cpp_mo class GeneralInfo(): def __init__(self, type, decl, namespaces): - self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces) + self.symbol_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces) # parse doxygen comments self.params={} @@ -141,13 +141,13 @@ class GeneralInfo(): break pieces = localName.split(".") if len(pieces) > 2: # ... - return spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1] + return name, spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1] elif len(pieces) == 2: # . - return spaceName, pieces[0], pieces[0], pieces[1] + return name, spaceName, pieces[0], pieces[0], pieces[1] elif len(pieces) == 1: # - return spaceName, "", "", pieces[0] + return name, spaceName, "", "", pieces[0] else: - return spaceName, "", "" # error?! + return name, spaceName, "", "" # error?! def fullName(self, isCPP=False): result = ".".join([self.fullClass(), self.name]) @@ -249,8 +249,8 @@ class ClassInfo(GeneralInfo): def getAllMethods(self): result = [] - result.extend([fi for fi in sorted(self.methods) if fi.isconstructor]) - result.extend([fi for fi in sorted(self.methods) if not fi.isconstructor]) + result += [fi for fi in self.methods if fi.isconstructor] + result += [fi for fi in self.methods if not fi.isconstructor] return result def addMethod(self, fi): @@ -369,7 +369,7 @@ class JavaWrapperGenerator(object): self.clear() def clear(self): - self.namespaces = set(["cv"]) + self.namespaces = ["cv"] self.classes = { "Mat" : ClassInfo([ 'class Mat', '', [], [] ], self.namespaces) } self.module = "" self.Module = "" @@ -512,9 +512,9 @@ class JavaWrapperGenerator(object): includes.append('#include "' + hdr + '"') for hdr in srcfiles: decls = parser.parse(hdr) - self.namespaces = parser.namespaces + self.namespaces = sorted(parser.namespaces) logging.info("\n\n===== Header: %s =====", hdr) - logging.info("Namespaces: %s", parser.namespaces) + logging.info("Namespaces: %s", sorted(parser.namespaces)) if decls: includes.append('#include "' + hdr + '"') else: @@ -536,7 +536,7 @@ class JavaWrapperGenerator(object): moduleCppCode = StringIO() package_path = os.path.join(output_java_path, module) mkdir_p(package_path) - for ci in self.classes.values(): + for ci in sorted(self.classes.values(), key=lambda x: x.symbol_id): if ci.name == "Mat": continue ci.initCodeStreams(self.Module) @@ -560,7 +560,7 @@ class JavaWrapperGenerator(object): report.write("\n".join(self.ported_func_list)) report.write("\n\nSKIPPED FUNCs LIST (%i of %i):\n\n" % (len(self.skipped_func_list), total_count)) report.write("".join(self.skipped_func_list)) - for i in self.def_args_hist.keys(): + for i in sorted(self.def_args_hist.keys()): report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i])) return report.getvalue() @@ -1028,10 +1028,11 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname if ci.consts: enumTypes = set(map(lambda c: c.enumType, ci.consts)) grouped_consts = {enumType: [c for c in ci.consts if c.enumType == enumType] for enumType in enumTypes} - for typeName, consts in grouped_consts.items(): + for typeName in sorted(grouped_consts.keys(), key=lambda x: str(x) if x is not None else ""): + consts = grouped_consts[typeName] logging.info("%s", consts) if typeName: - typeName = typeName.rsplit(".", 1)[-1] + typeNameShort = typeName.rsplit(".", 1)[-1] ###################### Utilize Java enums ###################### # ci.j_code.write(""" # public enum {1} {{ @@ -1045,9 +1046,9 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname # ) ################################################################ ci.j_code.write(""" - // C++: enum {1} + // C++: enum {1} ({2}) public static final int - {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, const_value(c.value)) for c in consts]), typeName) + {0};\n\n""".format((",\n"+" "*12).join(["%s = %s" % (c.name, const_value(c.value)) for c in consts]), typeNameShort, typeName) ) else: ci.j_code.write(""" @@ -1072,10 +1073,12 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname # manual ports if ci.name in ManualFuncs: - for func in ManualFuncs[ci.name].keys(): - ci.j_code.write ( "\n".join(ManualFuncs[ci.name][func]["j_code"]) ) - ci.jn_code.write( "\n".join(ManualFuncs[ci.name][func]["jn_code"]) ) - ci.cpp_code.write( "\n".join(ManualFuncs[ci.name][func]["cpp_code"]) ) + for func in sorted(ManualFuncs[ci.name].keys()): + logging.info("manual function: %s", func) + fn = ManualFuncs[ci.name][func] + ci.j_code.write("\n".join(fn["j_code"])) + ci.jn_code.write("\n".join(fn["jn_code"])) + ci.cpp_code.write("\n".join(fn["cpp_code"])) if ci.name != self.Module or ci.base: # finalize() @@ -1303,7 +1306,7 @@ if __name__ == "__main__": # initialize logger logging.basicConfig(filename='gen_java.log', format=None, filemode='w', level=logging.INFO) handler = logging.StreamHandler() - handler.setLevel(logging.WARNING) + handler.setLevel(os.environ.get('LOG_LEVEL', logging.WARNING)) logging.getLogger().addHandler(handler) # parse command line parameters From 4d00ed8df730f3829a9a8bc7b223246c249777c7 Mon Sep 17 00:00:00 2001 From: Chris Ballinger Date: Fri, 13 Nov 2020 07:30:53 -0800 Subject: [PATCH 086/152] Merge pull request #18771 from chrisballinger:xcode-12-fixes Xcode 12 and Python 2/3 fixes * Fix compilation issues using Xcode 12 on macOS Catalina * Fix macOS scripts to work on Python 2 or 3 * Fix additional issues with Python 3 * Fix additional Python 2/3 issue * Fix another Python 2/3 issue * Remove dependency on builtins module --- modules/objc/generator/gen_objc.py | 83 ++++++++++++++---------------- platforms/ios/build_framework.py | 6 +-- 2 files changed, 42 insertions(+), 47 deletions(-) diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py index 87e42e821d..e6637a7c4c 100755 --- a/modules/objc/generator/gen_objc.py +++ b/modules/objc/generator/gen_objc.py @@ -1,23 +1,20 @@ #!/usr/bin/env python +from __future__ import print_function, unicode_literals import sys, re, os.path, errno, fnmatch import json import logging import codecs +import io from shutil import copyfile from pprint import pformat from string import Template from distutils.dir_util import copy_tree -if sys.version_info[0] >= 3: - from io import StringIO -else: - import io - class StringIO(io.StringIO): - def write(self, s): - if isinstance(s, str): - s = unicode(s) # noqa: F821 - return super(StringIO, self).write(s) +try: + from io import StringIO # Python 3 +except: + from io import BytesIO as StringIO SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -267,15 +264,15 @@ class ClassInfo(GeneralInfo): return Template("CLASS $namespace::$classpath.$name : $base").substitute(**self.__dict__) def getImports(self, module): - return ["#import \"%s.h\"" % c for c in sorted(filter(lambda m: m != self.name, map(lambda m: type_dict[m]["import_module"] if m in type_dict and "import_module" in type_dict[m] else m, self.imports)))] + return ["#import \"%s.h\"" % c for c in sorted([m for m in [type_dict[m]["import_module"] if m in type_dict and "import_module" in type_dict[m] else m for m in self.imports] if m != self.name])] def isEnum(self, c): return c in type_dict and type_dict[c].get("is_enum", False) def getForwardDeclarations(self, module): - enum_decl = filter(lambda x:self.isEnum(x) and type_dict[x]["import_module"] != module, self.imports) - enum_imports = list(set(map(lambda m: type_dict[m]["import_module"], enum_decl))) - class_decl = filter(lambda x: not self.isEnum(x), self.imports) + enum_decl = [x for x in self.imports if self.isEnum(x) and type_dict[x]["import_module"] != module] + enum_imports = list(set([type_dict[m]["import_module"] for m in enum_decl])) + class_decl = [x for x in self.imports if not self.isEnum(x)] return ["#import \"%s.h\"" % c for c in enum_imports] + [""] + ["@class %s;" % c for c in sorted(class_decl)] def addImports(self, ctype, is_out_type): @@ -350,7 +347,7 @@ class ClassInfo(GeneralInfo): module = M, additionalImports = self.additionalImports.getvalue(), importBaseClass = '#import "' + self.base + '.h"' if not self.is_base_class else "", - forwardDeclarations = "\n".join(filter(None, self.getForwardDeclarations(objcM))), + forwardDeclarations = "\n".join([_f for _f in self.getForwardDeclarations(objcM) if _f]), enumDeclarations = self.enum_declarations.getvalue(), nativePointerHandling = Template( """ @@ -656,7 +653,7 @@ def build_swift_logues(args): def add_method_to_dict(class_name, fi): static = fi.static if fi.classname else True - if not method_dict.has_key((class_name, fi.objc_name)): + if (class_name, fi.objc_name) not in method_dict: objc_method_name = ("+" if static else "-") + fi.objc_name + ":" + build_objc_method_name(fi.args) method_dict[(class_name, fi.objc_name)] = objc_method_name @@ -664,7 +661,7 @@ def see_lookup(objc_class, see): semi_colon = see.find("::") see_class = see[:semi_colon] if semi_colon > 0 else objc_class see_method = see[(semi_colon + 2):] if semi_colon != -1 else see - if method_dict.has_key((see_class, see_method)): + if (see_class, see_method) in method_dict: method = method_dict[(see_class, see_method)] if see_class == objc_class: return method @@ -741,7 +738,7 @@ class ObjectiveCWrapperGenerator(object): logging.info('ignored: %s', constinfo) else: objc_type = enumType.rsplit(".", 1)[-1] if enumType else "" - if const_fix.has_key(constinfo.classname) and const_fix[constinfo.classname].has_key(objc_type) and const_fix[constinfo.classname][objc_type].has_key(constinfo.name): + if constinfo.classname in const_fix and objc_type in const_fix[constinfo.classname] and constinfo.name in const_fix[constinfo.classname][objc_type]: fixed_const = const_fix[constinfo.classname][objc_type][constinfo.name] constinfo.name = fixed_const constinfo.cname = fixed_const @@ -772,7 +769,7 @@ class ObjectiveCWrapperGenerator(object): objc_type = enumType.rsplit(".", 1)[-1] if objc_type in enum_ignore_list: return - if enum_fix.has_key(constinfo.classname): + if constinfo.classname in enum_fix: objc_type = enum_fix[constinfo.classname].get(objc_type, objc_type) import_module = constinfo.classname if constinfo.classname and constinfo.classname != objc_type else self.Module type_dict[ctype] = { "cast_from" : "int", @@ -800,7 +797,7 @@ class ObjectiveCWrapperGenerator(object): logging.info('ignored: %s', fi) elif classname in ManualFuncs and fi.objc_name in ManualFuncs[classname]: logging.info('manual: %s', fi) - if ManualFuncs[classname][fi.objc_name].has_key("objc_method_name"): + if "objc_method_name" in ManualFuncs[classname][fi.objc_name]: method_dict[(classname, fi.objc_name)] = ManualFuncs[classname][fi.objc_name]["objc_method_name"] elif not self.isWrapped(classname): logging.warning('not found: %s', fi) @@ -827,7 +824,7 @@ class ObjectiveCWrapperGenerator(object): updated_files += 1 def get_namespace_prefix(self, cname): - namespace = self.classes[cname].namespace if self.classes.has_key(cname) else "cv" + namespace = self.classes[cname].namespace if cname in self.classes else "cv" return namespace.replace(".", "::") + "::" def gen(self, srcfiles, module, output_path, output_objc_path, common_headers, manual_classes): @@ -875,7 +872,7 @@ class ObjectiveCWrapperGenerator(object): mkdir_p(package_path) extension_file = "%s/%s/%sExt.swift" % (output_objc_path, module, self.Module) - for ci in self.classes.values(): + for ci in list(self.classes.values()): if ci.name == "Mat": continue ci.initCodeStreams(self.Module) @@ -901,13 +898,13 @@ class ObjectiveCWrapperGenerator(object): report.write("\n".join(self.ported_func_list)) report.write("\n\nSKIPPED FUNCs LIST (%i of %i):\n\n" % (len(self.skipped_func_list), total_count)) report.write("".join(self.skipped_func_list)) - for i in self.def_args_hist.keys(): + for i in list(self.def_args_hist.keys()): report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i])) return report.getvalue() def fullTypeName(self, t): - if not type_dict[t].get("is_primitive", False) or type_dict[t].has_key("cast_to"): - if type_dict[t].has_key("cast_to"): + if not type_dict[t].get("is_primitive", False) or "cast_to" in type_dict[t]: + if "cast_to" in type_dict[t]: return type_dict[t]["cast_to"] else: namespace_prefix = self.get_namespace_prefix(t) @@ -916,7 +913,7 @@ class ObjectiveCWrapperGenerator(object): return t def build_objc2cv_prologue(self, prologue, vector_type, vector_full_type, objc_type, vector_name, array_name): - if not (type_dict.has_key(vector_type) and type_dict[vector_type].has_key("to_cpp") and type_dict[vector_type]["to_cpp"] != "%(n)s.nativeRef"): + if not (vector_type in type_dict and "to_cpp" in type_dict[vector_type] and type_dict[vector_type]["to_cpp"] != "%(n)s.nativeRef"): prologue.append("OBJC2CV(" + vector_full_type + ", " + objc_type[:-1] + ", " + vector_name + ", " + array_name + ");") else: conv_macro = "CONV_" + array_name @@ -925,7 +922,7 @@ class ObjectiveCWrapperGenerator(object): prologue.append("#undef " + conv_macro) def build_cv2objc_epilogue(self, epilogue, vector_type, vector_full_type, objc_type, vector_name, array_name): - if not (type_dict.has_key(vector_type) and type_dict[vector_type].has_key("from_cpp") and type_dict[vector_type]["from_cpp"] != ("[" + objc_type[:-1] + " fromNative:%(n)s]")): + if not (vector_type in type_dict and "from_cpp" in type_dict[vector_type] and type_dict[vector_type]["from_cpp"] != ("[" + objc_type[:-1] + " fromNative:%(n)s]")): epilogue.append("CV2OBJC(" + vector_full_type + ", " + objc_type[:-1] + ", " + vector_name + ", " + array_name + ");") else: unconv_macro = "UNCONV_" + array_name @@ -1106,7 +1103,7 @@ class ObjectiveCWrapperGenerator(object): ret_val = "cv::Ptr<" + namespace_prefix + ret_type + "> retVal = new " + namespace_prefix + ret_type + "(" tail = ")" ret_type_dict = type_dict[ret_type] - from_cpp = ret_type_dict["from_cpp_ptr"] if ret_type_dict.has_key("from_cpp_ptr") else ret_type_dict["from_cpp"] + from_cpp = ret_type_dict["from_cpp_ptr"] if "from_cpp_ptr" in ret_type_dict else ret_type_dict["from_cpp"] ret = "return " + (from_cpp % { "n" : "retVal" }) + ";" elif "from_cpp" in type_dict[ret_type]: ret = "return " + (type_dict[ret_type]["from_cpp"] % { "n" : "retVal" }) + ";" @@ -1212,13 +1209,13 @@ $unrefined_call$epilogue$ret return const_value(target.value) return v if ci.consts: - enumTypes = set(map(lambda c: c.enumType, ci.consts)) + enumTypes = set([c.enumType for c in ci.consts]) grouped_consts = {enumType: [c for c in ci.consts if c.enumType == enumType] for enumType in enumTypes} - for typeName, consts in grouped_consts.items(): + for typeName, consts in list(grouped_consts.items()): logging.info("%s", consts) if typeName: typeName = typeName.rsplit(".", 1)[-1] - if enum_fix.has_key(ci.cname): + if ci.cname in enum_fix: typeName = enum_fix[ci.cname].get(typeName, typeName) ci.enum_declarations.write(""" @@ -1257,7 +1254,7 @@ typedef NS_ENUM(int, {2}) {{ ci.addImports(pi.ctype, False) ci.method_declarations.write("@property " + ("(readonly) " if not pi.rw else "") + objc_type + " " + pi.name + ";\n") ptr_ref = "self." + ci.native_ptr_name + "->" if not ci.is_base_class else "self.nativePtr->" - if type_data.has_key("v_type"): + if "v_type" in type_data: vector_cpp_type = type_data["v_type"] has_namespace = vector_cpp_type.find("::") != -1 vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type @@ -1269,7 +1266,7 @@ typedef NS_ENUM(int, {2}) {{ self.build_cv2objc_epilogue(epilogue, vector_cpp_type, vector_full_cpp_type, objc_type, "retValVector", "retVal") ci.method_implementations.write("\t" + ("\n\t".join(epilogue)) + "\n") ci.method_implementations.write("\treturn retVal;\n}\n\n") - elif type_data.has_key("v_v_type"): + elif "v_v_type" in type_data: vector_cpp_type = type_data["v_v_type"] has_namespace = vector_cpp_type.find("::") != -1 vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type @@ -1283,14 +1280,14 @@ typedef NS_ENUM(int, {2}) {{ namespace_prefix = self.get_namespace_prefix(pi.ctype) ci.method_implementations.write("-(" + objc_type + ")" + pi.name + " {\n") ci.method_implementations.write("\tcv::Ptr<" + namespace_prefix + pi.ctype + "> retVal = new " + namespace_prefix + pi.ctype + "(" + ptr_ref + pi.name + ");\n") - from_cpp = type_data["from_cpp_ptr"] if type_data.has_key("from_cpp_ptr") else type_data["from_cpp"] + from_cpp = type_data["from_cpp_ptr"] if "from_cpp_ptr" in type_data else type_data["from_cpp"] ci.method_implementations.write("\treturn " + (from_cpp % {"n": "retVal"}) + ";\n}\n\n") else: from_cpp = type_data.get("from_cpp", "%(n)s") retVal = from_cpp % {"n": (ptr_ref + pi.name)} ci.method_implementations.write("-(" + objc_type + ")" + pi.name + " {\n\treturn " + retVal + ";\n}\n\n") if pi.rw: - if type_data.has_key("v_type"): + if "v_type" in type_data: vector_cpp_type = type_data["v_type"] has_namespace = vector_cpp_type.find("::") != -1 vector_full_cpp_type = self.fullTypeName(vector_cpp_type) if not has_namespace else vector_cpp_type @@ -1300,13 +1297,13 @@ typedef NS_ENUM(int, {2}) {{ ci.method_implementations.write("\t" + ("\n\t".join(prologue)) + "\n") ci.method_implementations.write("\t" + ptr_ref + pi.name + " = valVector;\n}\n\n") else: - to_cpp = type_data.get("to_cpp", ("(" + type_data.get("cast_to") + ")%(n)s") if type_data.has_key("cast_to") else "%(n)s") + to_cpp = type_data.get("to_cpp", ("(" + type_data.get("cast_to") + ")%(n)s") if "cast_to" in type_data else "%(n)s") val = to_cpp % {"n": pi.name} ci.method_implementations.write("-(void)set" + pi.name[0].upper() + pi.name[1:] + ":(" + objc_type + ")" + pi.name + " {\n\t" + ptr_ref + pi.name + " = " + val + ";\n}\n\n") # manual ports if ci.name in ManualFuncs: - for func in ManualFuncs[ci.name].keys(): + for func in list(ManualFuncs[ci.name].keys()): ci.method_declarations.write( "\n".join(ManualFuncs[ci.name][func]["declaration"]) ) ci.method_implementations.write( "\n".join(ManualFuncs[ci.name][func]["implementation"]) ) @@ -1373,11 +1370,11 @@ typedef NS_ENUM(int, {2}) {{ for dirname, dirs, files in os.walk(os.path.join(testdir, "test")): for filename in files: filepath = os.path.join(dirname, filename) - with open(filepath) as file: + with io.open(filepath, encoding="utf-8", errors="ignore") as file: body = file.read() body = body.replace("import OpenCV", "import " + framework_name) body = body.replace("#import ", "#import <" + framework_name + "/" + framework_name + ".h>") - with open(filepath, "w") as file: + with codecs.open(filepath, "w", "utf-8") as file: file.write(body) @@ -1477,9 +1474,9 @@ def sanitize_documentation_string(doc, type): in_code = True lines[i] = line.replace("", "") - lines = list(map(lambda x: x[x.find('*'):].strip() if x.lstrip().startswith("*") else x, lines)) - lines = list(map(lambda x: "* " + x[1:].strip() if x.startswith("*") and x != "*" else x, lines)) - lines = list(map(lambda x: x if x.startswith("*") else "* " + x if x and x != "*" else "*", lines)) + lines = list([x[x.find('*'):].strip() if x.lstrip().startswith("*") else x for x in lines]) + lines = list(["* " + x[1:].strip() if x.startswith("*") and x != "*" else x for x in lines]) + lines = list([x if x.startswith("*") else "* " + x if x and x != "*" else "*" for x in lines]) hasValues = False for line in lines: @@ -1605,9 +1602,7 @@ if __name__ == "__main__": if os.path.exists(objc_test_resources_dir): copy_tree(objc_test_resources_dir, os.path.join(objc_test_base_path, 'test', 'resources')) - manual_classes = filter(lambda x:type_dict.has_key(x), - map(lambda x: x[x.rfind('/')+1:-2], - filter(lambda x: x.endswith('.h'), copied_files))) + manual_classes = [x for x in [x[x.rfind('/')+1:-2] for x in [x for x in copied_files if x.endswith('.h')]] if x in type_dict] if len(srcfiles) > 0: generator.gen(srcfiles, module, dstdir, objc_base_path, common_headers, manual_classes) diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py index e89cf3c666..e759072825 100755 --- a/platforms/ios/build_framework.py +++ b/platforms/ios/build_framework.py @@ -31,12 +31,12 @@ However, {framework_name}.framework directory is erased and recreated on each ru Adding --dynamic parameter will build {framework_name}.framework as App Store dynamic framework. Only iOS 8+ versions are supported. """ -from __future__ import print_function +from __future__ import print_function, unicode_literals import glob, re, os, os.path, shutil, string, sys, argparse, traceback, multiprocessing from subprocess import check_call, check_output, CalledProcessError from distutils.dir_util import copy_tree -IPHONEOS_DEPLOYMENT_TARGET='8.0' # default, can be changed via command line options or environment variable +IPHONEOS_DEPLOYMENT_TARGET='9.0' # default, can be changed via command line options or environment variable def execute(cmd, cwd = None): print("Executing: %s in %s" % (cmd, cwd), file=sys.stderr) @@ -46,7 +46,7 @@ def execute(cmd, cwd = None): raise Exception("Child returned:", retcode) def getXCodeMajor(): - ret = check_output(["xcodebuild", "-version"]) + ret = check_output(["xcodebuild", "-version"]).decode('utf-8') m = re.match(r'Xcode\s+(\d+)\..*', ret, flags=re.IGNORECASE) if m: return int(m.group(1)) From a3f3fbe05de648e125a9aa4d64f92cbcf39007fd Mon Sep 17 00:00:00 2001 From: Andreas Franek Date: Sun, 18 Oct 2020 11:48:41 +0200 Subject: [PATCH 087/152] add rudimentary support for uEye cameras uEye are cameras from IDS, c.f. https://en.ids-imaging.com/ Supports driver version 4.94 and up currently, since the event system was overhauled there. Supports setting/getting the properties: fps,width,height --- CMakeLists.txt | 7 + modules/videoio/CMakeLists.txt | 9 + modules/videoio/cmake/detect_ueye.cmake | 25 + modules/videoio/cmake/init.cmake | 1 + modules/videoio/include/opencv2/videoio.hpp | 1 + modules/videoio/src/cap_interface.hpp | 2 + modules/videoio/src/cap_ueye.cpp | 499 ++++++++++++++++++++ modules/videoio/src/videoio_registry.cpp | 8 +- 8 files changed, 551 insertions(+), 1 deletion(-) create mode 100644 modules/videoio/cmake/detect_ueye.cmake create mode 100644 modules/videoio/src/cap_ueye.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4350b2fe2a..02c0009e9c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -367,6 +367,9 @@ OCV_OPTION(WITH_MSMF_DXVA "Enable hardware acceleration in Media Foundation back OCV_OPTION(WITH_XIMEA "Include XIMEA cameras support" OFF VISIBLE_IF NOT ANDROID AND NOT WINRT VERIFY HAVE_XIMEA) +OCV_OPTION(WITH_UEYE "Include UEYE camera support" OFF + VISIBLE_IF NOT ANDROID AND NOT APPLE AND NOT WINRT + VERIFY HAVE_UEYE) OCV_OPTION(WITH_XINE "Include Xine support (GPL)" OFF VISIBLE_IF UNIX AND NOT APPLE AND NOT ANDROID VERIFY HAVE_XINE) @@ -1372,6 +1375,10 @@ if(WITH_XIMEA OR HAVE_XIMEA) status(" XIMEA:" HAVE_XIMEA THEN YES ELSE NO) endif() +if(WITH_UEYE OR HAVE_UEYE) + status(" uEye:" HAVE_UEYE THEN YES ELSE NO) +endif() + if(WITH_XINE OR HAVE_XINE) status(" Xine:" HAVE_XINE THEN "YES (ver ${XINE_VERSION})" ELSE NO) endif() diff --git a/modules/videoio/CMakeLists.txt b/modules/videoio/CMakeLists.txt index 12ff992294..a31d969ab8 100644 --- a/modules/videoio/CMakeLists.txt +++ b/modules/videoio/CMakeLists.txt @@ -138,6 +138,15 @@ if(TARGET ocv.3rdparty.ximea) list(APPEND tgts ocv.3rdparty.ximea) endif() +if(TARGET ocv.3rdparty.ueye) + if("ueye" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all") + ocv_create_builtin_videoio_plugin("opencv_videoio_ueye" ocv.3rdparty.ueye "cap_ueye.cpp") + else() + list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ueye.cpp) + list(APPEND tgts ocv.3rdparty.ueye) + endif() +endif() + if(TARGET ocv.3rdparty.ffmpeg) if(HAVE_FFMPEG_WRAPPER) list(APPEND tgts ocv.3rdparty.ffmpeg) diff --git a/modules/videoio/cmake/detect_ueye.cmake b/modules/videoio/cmake/detect_ueye.cmake new file mode 100644 index 0000000000..495e9c2450 --- /dev/null +++ b/modules/videoio/cmake/detect_ueye.cmake @@ -0,0 +1,25 @@ +if(NOT HAVE_UEYE) + if(WIN32) + if(X86_64) + set(_WIN_LIB_SUFFIX "_64") + endif() + endif() + find_path(UEYE_INCLUDE "ueye.h" + PATHS "${UEYE_ROOT}" ENV UEYE_ROOT "/usr" "C:/Program Files/IDS/uEye/Develop" + HINTS "${regpath}" + PATH_SUFFIXES "include") + find_library(UEYE_LIBRARY ueye_api${_WIN_LIB_SUFFIX} + PATHS "${UEYE_ROOT}" ENV UEYE_ROOT "/usr" "C:/Program Files/IDS/uEye/Develop" + HINTS "${regpath}" + PATH_SUFFIXES "lib") + if(UEYE_INCLUDE AND UEYE_LIBRARY) + set(HAVE_UEYE TRUE) + endif() +endif() +unset(_WIN_LIB_SUFFIX) + +if(HAVE_UEYE) + ocv_add_external_target(ueye "${UEYE_INCLUDE}" "${UEYE_LIBRARY}" "HAVE_UEYE") +endif() + +set(HAVE_UEYE ${HAVE_UEYE} PARENT_SCOPE) diff --git a/modules/videoio/cmake/init.cmake b/modules/videoio/cmake/init.cmake index 1efef12c5e..42c3f9c27f 100644 --- a/modules/videoio/cmake/init.cmake +++ b/modules/videoio/cmake/init.cmake @@ -30,6 +30,7 @@ add_backend("msdk" WITH_MFX) add_backend("openni2" WITH_OPENNI2) add_backend("pvapi" WITH_PVAPI) add_backend("realsense" WITH_LIBREALSENSE) +add_backend("ueye" WITH_UEYE) add_backend("ximea" WITH_XIMEA) add_backend("xine" WITH_XINE) diff --git a/modules/videoio/include/opencv2/videoio.hpp b/modules/videoio/include/opencv2/videoio.hpp index eb5645ab77..40586a35fb 100644 --- a/modules/videoio/include/opencv2/videoio.hpp +++ b/modules/videoio/include/opencv2/videoio.hpp @@ -120,6 +120,7 @@ enum VideoCaptureAPIs { CAP_OPENCV_MJPEG = 2200, //!< Built-in OpenCV MotionJPEG codec CAP_INTEL_MFX = 2300, //!< Intel MediaSDK CAP_XINE = 2400, //!< XINE engine (Linux) + CAP_UEYE = 2500, //!< uEye Camera API }; /** @brief %VideoCapture generic properties identifier. diff --git a/modules/videoio/src/cap_interface.hpp b/modules/videoio/src/cap_interface.hpp index 34bef9b9e1..5112fffe6f 100644 --- a/modules/videoio/src/cap_interface.hpp +++ b/modules/videoio/src/cap_interface.hpp @@ -301,6 +301,8 @@ Ptr create_PvAPI_capture( int index ); Ptr create_XIMEA_capture_cam( int index ); Ptr create_XIMEA_capture_file( const std::string &serialNumber ); +Ptr create_ueye_camera(int camera); + Ptr create_Aravis_capture( int index ); Ptr createMotionJpegCapture(const std::string& filename); diff --git a/modules/videoio/src/cap_ueye.cpp b/modules/videoio/src/cap_ueye.cpp new file mode 100644 index 0000000000..3912da52bc --- /dev/null +++ b/modules/videoio/src/cap_ueye.cpp @@ -0,0 +1,499 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +/* +This file adds support for uEye cameras in OpenCV. + +Cameras can be opened by ID. If 0 is passed as ID the first available camera +will be used. For any other number, the camera associated with that ID will be +opened (c.f. IDS documentation for is_InitCamera). + +Images are double buffered in a ring buffer of size 2 (called 'image memory +sequence' in the uEye SDK c.f. is_AddToSequence). The memory is locked on a +'grab' call and copied and unlocked during 'retrieve'. The image queue provided +in the uEye SDK is not used since it automatically locks the buffers when a new +image arrives, which means the buffer can fill up when frames are retrieved too +slowly. +*/ + +#include "precomp.hpp" + +#include + +#include +#include +#include +#include +#include + +namespace cv +{ +namespace +{ +struct image_buffer +{ + char* data; + INT id; +}; +} +#define ASSERT_UEYE(expr) { UINT expr_result = expr; if(IS_SUCCESS != expr_result) CV_Error_(Error::StsAssert, ("%s %s %d: failed with code %u", #expr, __FILE__, __LINE__, expr_result)); } +#define PRINT_ON_UEYE_ERROR( expr ) { UINT expr_result = expr; if(IS_SUCCESS != expr_result) CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << #expr << " " << __FILE__ << " " << __LINE__ << ": failed with code " << expr_result); } + +struct VideoCapture_uEye CV_FINAL: public IVideoCapture +{ + int getCaptureDomain() CV_OVERRIDE + { + return cv::CAP_UEYE; + } + + VideoCapture_uEye(int camera); + + bool isOpened() const CV_OVERRIDE + { + return 255 != cam_id; + } + + ~VideoCapture_uEye() CV_OVERRIDE + { + close(); + } + + double getProperty(int property_id) const CV_OVERRIDE; + bool setProperty(int property_id, double value) CV_OVERRIDE; + bool grabFrame() CV_OVERRIDE; + bool retrieveFrame(int outputType, OutputArray frame) CV_OVERRIDE; + + void close(); + void start_camera(); + void stop_camera(); + + void unlock_image_buffer(); + + HIDS cam_id = 255; + SENSORINFO sensor_info; + double fps; + int width; + int height; + int pitch; + std::array ring_buffer = {{{nullptr, 0}, {nullptr, 0}}}; + char* locked_image = nullptr; +}; + +Ptr create_ueye_camera(int camera) +{ + return cv::makePtr(camera); +} + +namespace +{ +std::vector get_freerun_formats(HIDS cam_id) +{ + UINT count; + ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_GET_NUM_ENTRIES, &count, sizeof(count))); + UINT sizeof_list = sizeof(IMAGE_FORMAT_LIST) + (count - 1) * sizeof(IMAGE_FORMAT_INFO); + std::unique_ptr list(new (std::malloc(sizeof_list)) IMAGE_FORMAT_LIST); + + list->nSizeOfListEntry = sizeof(IMAGE_FORMAT_INFO); + list->nNumListElements = count; + ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_GET_LIST, list.get(), sizeof_list)); + + // copy to vector and filter out non-live modes + std::vector formats; + formats.reserve(count + 1); + std::copy_if(list->FormatInfo, list->FormatInfo+count, std::back_inserter(formats), [](const IMAGE_FORMAT_INFO& format) + { + return (format.nSupportedCaptureModes & CAPTMODE_FREERUN); + }); + + return formats; +} + +void set_matching_format(HIDS cam_id, const SENSORINFO& sensor_info, int width, int height) +{ + // uEye camera formats sometimes do not include the native resolution (without binning, subsampling or AOI) + if(width == int(sensor_info.nMaxWidth) && height == int(sensor_info.nMaxHeight)) + { + ASSERT_UEYE(is_SetBinning(cam_id, IS_BINNING_DISABLE)); + ASSERT_UEYE(is_SetSubSampling(cam_id, IS_SUBSAMPLING_DISABLE)); + IS_RECT rectAOI = {0, 0, width, height}; + ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_SET_AOI, &rectAOI, sizeof(rectAOI))); + return; + } + auto formats = get_freerun_formats(cam_id); + CV_Assert(formats.size() > 0); + auto calc_err = [=](const IMAGE_FORMAT_INFO& format) + { + return format.nWidth - width + format.nHeight - height + (sensor_info.nMaxWidth - width)/2 - format.nX0 + (sensor_info.nMaxHeight - height)/2 - format.nY0; + }; + + std::sort(formats.begin(), formats.end(), [=](const IMAGE_FORMAT_INFO& f0, const IMAGE_FORMAT_INFO& f1) + { + return calc_err(f0) < calc_err(f1); + }); + + ASSERT_UEYE(is_ImageFormat(cam_id, IMGFRMT_CMD_SET_FORMAT, &formats.front().nFormatID, sizeof(UINT))); +} +} + + +VideoCapture_uEye::VideoCapture_uEye(int camera) +{ + CV_Assert(camera >= 0); + CV_Assert(camera < 255); // max camera id is 254 + cam_id = static_cast(camera); + CV_LOG_DEBUG(NULL, "VIDEOIO(UEYE:" << cam_id << "): opening..."); + ASSERT_UEYE(is_InitCamera(&cam_id, nullptr)); + + IS_INIT_EVENT init_event = {IS_SET_EVENT_FRAME, FALSE, FALSE}; + ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_INIT, &init_event, sizeof(init_event))); + UINT frame_event = IS_SET_EVENT_FRAME; + ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_ENABLE, &frame_event, sizeof(frame_event))); + + ASSERT_UEYE(is_ResetToDefault(cam_id)); + + ASSERT_UEYE(is_SetFrameRate(cam_id, IS_GET_FRAMERATE, &fps)); + + start_camera(); +} + +double VideoCapture_uEye::getProperty(int property_id) const +{ + auto value = 0.; + switch (property_id) + { + case CAP_PROP_FRAME_WIDTH: + value = width; + break; + case CAP_PROP_FRAME_HEIGHT: + value = height; + break; + case CAP_PROP_FPS: + value = fps; + break; + } + return value; +} + +bool VideoCapture_uEye::setProperty(int property_id, double value) +{ + if(!isOpened()) + return false; + try + { + bool set_format = false; + switch (property_id) + { + case CAP_PROP_FRAME_WIDTH: + if(width == value) + break; + width = static_cast(value); + set_format = true; + break; + case CAP_PROP_FRAME_HEIGHT: + if(height == value) + break; + height = static_cast(value); + set_format = true; + break; + case CAP_PROP_FPS: + if(fps == value) + break; + ASSERT_UEYE(is_SetFrameRate(cam_id, value, &fps)); + break; + } + if(set_format) + { + set_matching_format(cam_id, sensor_info, width, height); + start_camera(); + } + } + catch(const cv::Exception& e) + { + CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what()); + return false; + } + + return true; +} + +bool VideoCapture_uEye::grabFrame() +{ + if (!isOpened()) + return false; + + try + { + IS_WAIT_EVENT wait_event{IS_SET_EVENT_FRAME, static_cast(3*1000/fps), 0, 0}; // wait for the time it should take to get 3 frames + ASSERT_UEYE(is_Event(cam_id, IS_EVENT_CMD_WAIT, &wait_event, sizeof(wait_event))); + INT current_buffer_id; + char* current_buffer; + char* last; + ASSERT_UEYE(is_GetActSeqBuf(cam_id, ¤t_buffer_id, ¤t_buffer, &last)); + + const int lock_tries = 4; + std::chrono::milliseconds lock_time_out(static_cast(1000/(fps*4))); // wait for a quarter of a frame if not lockable, should not occur in event mode + UINT ret; + for(int i = 0; i < lock_tries; i++) // try locking the buffer + { + ret = is_LockSeqBuf(cam_id, IS_IGNORE_PARAMETER, last); + if(IS_SEQ_BUFFER_IS_LOCKED == ret) + std::this_thread::sleep_for(lock_time_out); + else + break; + } + ASSERT_UEYE(ret); + locked_image = last; + } + catch(const cv::Exception& e) + { + CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what()); + close(); + return false; + } + return true; +} + +bool VideoCapture_uEye::retrieveFrame(int /*outputType*/, OutputArray frame) +{ + if(!locked_image) + return false; + Mat(height, width, CV_8UC3, locked_image, pitch).copyTo(frame); + try + { + unlock_image_buffer(); + } + catch(const cv::Exception& e) + { + CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what()); + return false; + } + + return true; +} + +void VideoCapture_uEye::start_camera() +{ + stop_camera(); + + IS_RECT aoi; + ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_AOI, &aoi, sizeof(aoi))); + + UINT x_is_abs_pos; + UINT y_is_abs_pos; + + ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_POS_X_ABS, &x_is_abs_pos , sizeof(x_is_abs_pos))); + ASSERT_UEYE(is_AOI(cam_id, IS_AOI_IMAGE_GET_POS_Y_ABS, &y_is_abs_pos , sizeof(y_is_abs_pos))); + + ASSERT_UEYE(is_GetSensorInfo(cam_id, &sensor_info)); + width = x_is_abs_pos? sensor_info.nMaxWidth: aoi.s32Width; + height = y_is_abs_pos? sensor_info.nMaxHeight: aoi.s32Height; + + // allocate ring_buffer + int bpp = 24; + for(auto& image_memory: ring_buffer) + { + ASSERT_UEYE(is_AllocImageMem(cam_id, width, height, bpp, &image_memory.data, &image_memory.id)); + ASSERT_UEYE(is_AddToSequence(cam_id, image_memory.data, image_memory.id)); + } + + // TODO: this could be set according to sensor_info.nColorMode and CAP_PROP_FOURCC + ASSERT_UEYE(is_SetColorMode(cam_id, IS_CM_BGR8_PACKED)); + ASSERT_UEYE(is_GetImageMemPitch (cam_id, &pitch)); + + ASSERT_UEYE(is_CaptureVideo(cam_id, IS_DONT_WAIT)); +} + +void VideoCapture_uEye::stop_camera() +{ + if(is_CaptureVideo(cam_id, IS_GET_LIVE)) + ASSERT_UEYE(is_StopLiveVideo(cam_id, IS_FORCE_VIDEO_STOP)); + + if(locked_image) + unlock_image_buffer(); + ASSERT_UEYE(is_ClearSequence(cam_id)); + for(auto buffer: ring_buffer) + { + if(buffer.data) + { + ASSERT_UEYE(is_FreeImageMem(cam_id, buffer.data, buffer.id)); + buffer.data = nullptr; + } + } +} + +void VideoCapture_uEye::close() +{ + if(!isOpened()) + return; + CV_LOG_DEBUG(NULL, "VIDEOIO(UEYE:" << cam_id << "): closing..."); + // During closing we do not care about correct error handling as much. + // Either something has gone wrong already or it has been called from the + // destructor. Just make sure that all calls are done. + try + { + stop_camera(); + } + catch(const cv::Exception& e) + { + CV_LOG_ERROR(NULL, "VIDEOIO(UEYE:" << cam_id << "): " << e.what()); + } + UINT frame_event = IS_SET_EVENT_FRAME; + PRINT_ON_UEYE_ERROR(is_Event(cam_id, IS_EVENT_CMD_DISABLE, &frame_event, sizeof(frame_event))); + PRINT_ON_UEYE_ERROR(is_Event(cam_id, IS_EVENT_CMD_EXIT, &frame_event, sizeof(frame_event))); + PRINT_ON_UEYE_ERROR(is_ExitCamera(cam_id)); + cam_id = 255; +} + +void VideoCapture_uEye::unlock_image_buffer() +{ + char* tmp_buffer = nullptr; + std::swap(locked_image, tmp_buffer); + ASSERT_UEYE(is_UnlockSeqBuf(cam_id, IS_IGNORE_PARAMETER, tmp_buffer)); +} +} // namespace cv + +// plugin glue +#if defined(BUILD_PLUGIN) + +#include "plugin_api.hpp" + +namespace cv +{ + +namespace +{ +#define CV_PLUGIN_NULL_FAIL(ptr) if(!ptr) return CV_ERROR_FAIL; +#define CV_PLUGIN_CALL_BEGIN CV_PLUGIN_NULL_FAIL(handle) try { +#define CV_PLUGIN_CALL_END } catch (...) { return CV_ERROR_FAIL; } + +CvResult CV_API_CALL cv_capture_open(const char*, int cam_id, CV_OUT CvPluginCapture* handle) +{ + CV_PLUGIN_CALL_BEGIN + + *handle = NULL; + std::unique_ptr cap(new VideoCapture_uEye(cam_id)); + if (cap->isOpened()) + { + *handle = (CvPluginCapture)cap.release(); + return CV_ERROR_OK; + } + return CV_ERROR_FAIL; + + CV_PLUGIN_CALL_END +} + +CvResult CV_API_CALL cv_capture_release(CvPluginCapture handle) +{ + CV_PLUGIN_NULL_FAIL(handle) + + VideoCapture_uEye* instance = (VideoCapture_uEye*)handle; + delete instance; + return CV_ERROR_OK; +} + + +CvResult CV_API_CALL cv_capture_get_prop(CvPluginCapture handle, int prop, CV_OUT double* val) +{ + CV_PLUGIN_NULL_FAIL(val) + CV_PLUGIN_CALL_BEGIN + + VideoCapture_uEye* instance = (VideoCapture_uEye*)handle; + *val = instance->getProperty(prop); + return CV_ERROR_OK; + + CV_PLUGIN_CALL_END +} + +CvResult CV_API_CALL cv_capture_set_prop(CvPluginCapture handle, int prop, double val) +{ + CV_PLUGIN_CALL_BEGIN + + VideoCapture_uEye* instance = (VideoCapture_uEye*)handle; + return instance->setProperty(prop, val) ? CV_ERROR_OK : CV_ERROR_FAIL; + + CV_PLUGIN_CALL_END +} + +CvResult CV_API_CALL cv_capture_grab(CvPluginCapture handle) +{ + CV_PLUGIN_CALL_BEGIN + + VideoCapture_uEye* instance = (VideoCapture_uEye*)handle; + return instance->grabFrame() ? CV_ERROR_OK : CV_ERROR_FAIL; + + CV_PLUGIN_CALL_END +} + +CvResult CV_API_CALL cv_capture_retrieve(CvPluginCapture handle, int stream_idx, cv_videoio_retrieve_cb_t callback, void* userdata) +{ + CV_PLUGIN_CALL_BEGIN + + VideoCapture_uEye* instance = (VideoCapture_uEye*)handle; + Mat img; + if (instance->retrieveFrame(stream_idx, img)) + return callback(stream_idx, img.data, (int)img.step, img.cols, img.rows, img.channels(), userdata); + return CV_ERROR_FAIL; + + CV_PLUGIN_CALL_END +} + +CvResult CV_API_CALL cv_writer_open(const char* /*filename*/, int /*fourcc*/, double /*fps*/, int /*width*/, int /*height*/, int /*isColor*/, + CV_OUT CvPluginWriter* /*handle*/) +{ + return CV_ERROR_FAIL; +} + +CvResult CV_API_CALL cv_writer_release(CvPluginWriter /*handle*/) +{ + return CV_ERROR_FAIL; +} + +CvResult CV_API_CALL cv_writer_get_prop(CvPluginWriter /*handle*/, int /*prop*/, CV_OUT double* /*val*/) +{ + return CV_ERROR_FAIL; +} + +CvResult CV_API_CALL cv_writer_set_prop(CvPluginWriter /*handle*/, int /*prop*/, double /*val*/) +{ + return CV_ERROR_FAIL; +} + +CvResult CV_API_CALL cv_writer_write(CvPluginWriter /*handle*/, const unsigned char* /*data*/, int /*step*/, int /*width*/, int /*height*/, int /*cn*/) +{ + return CV_ERROR_FAIL; +} + +const OpenCV_VideoIO_Plugin_API_preview plugin_api_v0 = +{ + { + sizeof(OpenCV_VideoIO_Plugin_API_preview), ABI_VERSION, API_VERSION, + CV_VERSION_MAJOR, CV_VERSION_MINOR, CV_VERSION_REVISION, CV_VERSION_STATUS, + "uEye OpenCV Video I/O plugin" + }, + /* 1*/CAP_UEYE, + /* 2*/cv_capture_open, + /* 3*/cv_capture_release, + /* 4*/cv_capture_get_prop, + /* 5*/cv_capture_set_prop, + /* 6*/cv_capture_grab, + /* 7*/cv_capture_retrieve, + /* 8*/cv_writer_open, + /* 9*/cv_writer_release, + /* 10*/cv_writer_get_prop, + /* 11*/cv_writer_set_prop, + /* 12*/cv_writer_write +}; +} // namespace +} // namespace cv + +const OpenCV_VideoIO_Plugin_API_preview* opencv_videoio_plugin_init_v0(int requested_abi_version, int requested_api_version, void* /*reserved=NULL*/) CV_NOEXCEPT +{ + if (requested_abi_version != 0) + return NULL; + if (requested_api_version != 0) + return NULL; + return &cv::plugin_api_v0; +} + +#endif // BUILD_PLUGIN diff --git a/modules/videoio/src/videoio_registry.cpp b/modules/videoio/src/videoio_registry.cpp index b5798db80e..3ee1bab822 100644 --- a/modules/videoio/src/videoio_registry.cpp +++ b/modules/videoio/src/videoio_registry.cpp @@ -51,7 +51,7 @@ namespace { - platform specific universal SDK: WINRT, AVFOUNDATION, MSMF/DSHOW, V4L/V4L2 - RGB-D: OpenNI/OpenNI2, REALSENSE - special OpenCV (file-based): "images", "mjpeg" -- special camera SDKs, including stereo: other special SDKs: FIREWIRE/1394, XIMEA/ARAVIS/GIGANETIX/PVAPI(GigE) +- special camera SDKs, including stereo: other special SDKs: FIREWIRE/1394, XIMEA/ARAVIS/GIGANETIX/PVAPI(GigE)/uEye - other: XINE, gphoto2, etc */ static const struct VideoBackendInfo builtin_backends[] = @@ -130,6 +130,12 @@ static const struct VideoBackendInfo builtin_backends[] = DECLARE_STATIC_BACKEND(CAP_ARAVIS, "ARAVIS", MODE_CAPTURE_BY_INDEX, 0, create_Aravis_capture, 0), #endif +#ifdef HAVE_UEYE // uEye + DECLARE_STATIC_BACKEND(CAP_UEYE, "UEYE", MODE_CAPTURE_BY_INDEX, 0, create_ueye_camera, 0), +#elif defined(ENABLE_PLUGINS) + DECLARE_DYNAMIC_BACKEND(CAP_UEYE, "UEYE", MODE_CAPTURE_BY_INDEX), +#endif + #ifdef HAVE_GPHOTO2 DECLARE_STATIC_BACKEND(CAP_GPHOTO2, "GPHOTO2", MODE_CAPTURE_ALL, createGPhoto2Capture, createGPhoto2Capture, 0), #endif From 61144f935efaae03d506ab2b54ee02b3bc1a4452 Mon Sep 17 00:00:00 2001 From: Sergei Slashchinin <62052793+sl-sergei@users.noreply.github.com> Date: Sat, 14 Nov 2020 01:22:10 +0300 Subject: [PATCH 088/152] Merge pull request #18783 from sl-sergei:fix_conv1d Add support for Conv1D on OpenCV backend * Add support for Conv1D on OpenCV backend * disable tests on other targets/backends * Fix formatting * Restore comment * Remove unnecessary flag and fix test logic * Fix perf test * fix braces * Fix indentation, assert check and remove unnecessary condition * Remove unnecessary changes * Add test cases for variable weights and bias * dnn(conv): fallback on OpenCV+CPU instead of failures * coding style --- modules/dnn/perf/perf_convolution.cpp | 4 +- modules/dnn/perf/perf_convolution1d.cpp | 163 +++++++++++++ modules/dnn/perf/perf_convolution3d.cpp | 4 +- modules/dnn/src/layers/convolution_layer.cpp | 228 ++++++++++++++----- modules/dnn/src/onnx/onnx_importer.cpp | 8 +- modules/dnn/test/test_onnx_importer.cpp | 61 ++++- modules/dnn/test/test_tf_importer.cpp | 2 - 7 files changed, 402 insertions(+), 68 deletions(-) create mode 100644 modules/dnn/perf/perf_convolution1d.cpp diff --git a/modules/dnn/perf/perf_convolution.cpp b/modules/dnn/perf/perf_convolution.cpp index 7d51cd300f..c2a3a66ab9 100644 --- a/modules/dnn/perf/perf_convolution.cpp +++ b/modules/dnn/perf/perf_convolution.cpp @@ -533,7 +533,7 @@ struct ConvParamID CONV_100 = 100, CONV_LAST = sizeof(testConvolutionConfigs) / sizeof(testConvolutionConfigs[0]) }; - int val_; \ + int val_; ConvParamID(int val = 0) : val_(val) {} operator int() const { return val_; } static ::testing::internal::ParamGenerator all() @@ -546,7 +546,7 @@ struct ConvParamID ConvParamID v_[NUM]; for (int i = 0; i < NUM; ++i) { v_[i] = ConvParamID(i); } // reduce generated code size return ::testing::ValuesIn(v_, v_ + NUM); } -}; \ +}; static inline void PrintTo(const ConvParamID& v, std::ostream* os) { CV_Assert((int)v >= 0); CV_Assert((int)v < ConvParamID::CONV_LAST); diff --git a/modules/dnn/perf/perf_convolution1d.cpp b/modules/dnn/perf/perf_convolution1d.cpp new file mode 100644 index 0000000000..c35cbd503f --- /dev/null +++ b/modules/dnn/perf/perf_convolution1d.cpp @@ -0,0 +1,163 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "perf_precomp.hpp" +#include + +namespace opencv_test { + +struct Conv1DParam_t { + int kernel; + struct BlobShape { int dims[3]; } shapeIn; + int outCN; + int groups; + int stride; + int dilation; + int pad[2]; + const char* padMode; + bool hasBias; + double declared_flops; +}; +// Details: #12142 +static const Conv1DParam_t testConvolution1DConfigs[] = { + {3, {{1, 6, 10}}, 6, 1, 1, 1, {0, 0}, "VALID", true, 1776.}, + {3, {{1, 2, 19}}, 2, 2, 2, 1, {1, 1}, "", true, 260.}, + {3, {{1, 2, 25}}, 2, 2, 1, 1, {2, 2}, "SAME", false, 650.}, +}; + +struct Conv1DParamID +{ + enum { + CONV_0 = 0, + CONV_LAST = sizeof(testConvolution1DConfigs) / sizeof(testConvolution1DConfigs[0]) + }; + int val_; + Conv1DParamID(int val = 0) : val_(val) {} + operator int() const { return val_; } + static ::testing::internal::ParamGenerator all() + { + enum { NUM = (int)CONV_LAST }; + Conv1DParamID v_[NUM]; for (int i = 0; i < NUM; ++i) { v_[i] = Conv1DParamID(i); } // reduce generated code size + return ::testing::ValuesIn(v_, v_ + NUM); + } +}; +static inline void PrintTo(const Conv1DParamID& v, std::ostream* os) +{ + CV_Assert((int)v >= 0); CV_Assert((int)v < Conv1DParamID::CONV_LAST); + const Conv1DParam_t& p = testConvolution1DConfigs[(int)v]; + + *os << "GFLOPS=" << cv::format("%.3f", p.declared_flops * 1e-9) + << ", K=[" << p.kernel << "]" + << ", IN={" << p.shapeIn.dims[0] << ", " << p.shapeIn.dims[1] << ", " << p.shapeIn.dims[2] << "}" + << ", OCN=" << p.outCN; + if (p.groups > 1) + *os << ", G=" << p.groups; + if (p.stride != 1) + *os << ", S=" << p.stride; + if (p.dilation != 1) + *os << ", D=" << p.dilation; + if (p.pad[0] != 0 && p.pad[1] != 0 ) + *os << ", P=(" << p.pad[0] << ", " << p.pad[1] << ")"; + if (!((std::string)p.padMode).empty()) + *os << ", PM=" << ((std::string)p.padMode); + if (p.hasBias) + *os << ", BIAS"; +} + + +typedef tuple > Conv1DTestParam_t; +typedef TestBaseWithParam Conv1D; + +PERF_TEST_P_(Conv1D, conv1d) +{ + int test_id = (int)get<0>(GetParam()); + ASSERT_GE(test_id, 0); ASSERT_LT(test_id, Conv1DParamID::CONV_LAST); + const Conv1DParam_t& params = testConvolution1DConfigs[test_id]; + double declared_flops = params.declared_flops; + + DictValue kernel = DictValue::arrayInt(¶ms.kernel, 1); + DictValue stride = DictValue::arrayInt(¶ms.stride, 1); + DictValue pad = DictValue::arrayInt(¶ms.pad[0], 2); + DictValue dilation = DictValue::arrayInt(¶ms.dilation, 1); + + MatShape inputShape = MatShape(params.shapeIn.dims, params.shapeIn.dims + 3); + int outChannels = params.outCN; + int groups = params.groups; + std::string padMode(params.padMode); + + bool hasBias = params.hasBias; + Backend backendId = get<0>(get<1>(GetParam())); + Target targetId = get<1>(get<1>(GetParam())); + + if (targetId != DNN_TARGET_CPU) + throw SkipTestException("Only CPU is supported"); + + int inChannels = inputShape[1]; + + int sz[] = {outChannels, inChannels / groups, params.kernel}; + Mat weights(3, &sz[0], CV_32F); + randu(weights, -1.0f, 1.0f); + + LayerParams lp; + lp.set("kernel_size", kernel); + lp.set("pad", pad); + if (!padMode.empty()) + lp.set("pad_mode", padMode); + + lp.set("stride", stride); + lp.set("dilation", dilation); + lp.set("num_output", outChannels); + lp.set("group", groups); + lp.set("bias_term", hasBias); + lp.type = "Convolution"; + lp.name = "testLayer"; + lp.blobs.push_back(weights); + + if (hasBias) + { + Mat bias(1, outChannels, CV_32F); + randu(bias, -1.0f, 1.0f); + lp.blobs.push_back(bias); + } + + int inpSz[] = {1, inChannels, inputShape[2]}; + Mat input(3, &inpSz[0], CV_32F); + randu(input, -1.0f, 1.0f); + + Net net; + net.addLayerToPrev(lp.name, lp.type, lp); + + net.setInput(input); + net.setPreferableBackend(backendId); + net.setPreferableTarget(targetId); + + // warmup + Mat output = net.forward(); + + MatShape netInputShape = shape(input); + size_t weightsMemory = 0, blobsMemory = 0; + net.getMemoryConsumption(netInputShape, weightsMemory, blobsMemory); + int64 flops = net.getFLOPS(netInputShape); + CV_Assert(flops > 0); + + std::cout + << "IN=" << divUp(input.total() * input.elemSize(), 1u<<10) << " Kb " << netInputShape + << " OUT=" << divUp(output.total() * output.elemSize(), 1u<<10) << " Kb " << shape(output) + << " Weights(parameters): " << divUp(weightsMemory, 1u<<10) << " Kb" + << " MFLOPS=" << flops * 1e-6 << std::endl; + + TEST_CYCLE() + { + Mat res = net.forward(); + } + EXPECT_NEAR(flops, declared_flops, declared_flops * 1e-6); + SANITY_CHECK_NOTHING(); +} + +INSTANTIATE_TEST_CASE_P(/**/, Conv1D, Combine( + Conv1DParamID::all(), + dnnBackendsAndTargets(false, false) // defined in ../test/test_common.hpp +)); + +} // namespace diff --git a/modules/dnn/perf/perf_convolution3d.cpp b/modules/dnn/perf/perf_convolution3d.cpp index 1f512b2a15..0cf4ce26a3 100644 --- a/modules/dnn/perf/perf_convolution3d.cpp +++ b/modules/dnn/perf/perf_convolution3d.cpp @@ -46,7 +46,7 @@ struct Conv3DParamID CONV_100 = 16, CONV_LAST = sizeof(testConvolution3DConfigs) / sizeof(testConvolution3DConfigs[0]) }; - int val_; \ + int val_; Conv3DParamID(int val = 0) : val_(val) {} operator int() const { return val_; } static ::testing::internal::ParamGenerator all() @@ -59,7 +59,7 @@ struct Conv3DParamID Conv3DParamID v_[NUM]; for (int i = 0; i < NUM; ++i) { v_[i] = Conv3DParamID(i); } // reduce generated code size return ::testing::ValuesIn(v_, v_ + NUM); } -}; \ +}; static inline void PrintTo(const Conv3DParamID& v, std::ostream* os) { CV_Assert((int)v >= 0); CV_Assert((int)v < Conv3DParamID::CONV_LAST); diff --git a/modules/dnn/src/layers/convolution_layer.cpp b/modules/dnn/src/layers/convolution_layer.cpp index 473c07b755..c8245c487d 100644 --- a/modules/dnn/src/layers/convolution_layer.cpp +++ b/modules/dnn/src/layers/convolution_layer.cpp @@ -113,17 +113,22 @@ public: MatSize weightShape = blobs.empty() ? inputs[1].size : blobs[0].size; CV_Assert(inputs[0].dims == outputs[0].dims); + if (weightShape.dims() == 3) + { + kernel_size.assign(1, kernel_size[0]); + strides.assign(1, strides[0]); + } CV_Assert(weightShape.dims() == kernel_size.size() + 2); for (int i = 0; i < kernel_size.size(); i++) { CV_Assert(weightShape[i + 2] == kernel_size[i]); } const Mat &input = inputs[0]; - CV_Assert((input.dims == 4 || input.dims == 5) && (input.type() == CV_32F || input.type() == CV_16S)); + CV_Assert(((input.dims == 3 && kernel_size.size() == 1) || input.dims == 4 || input.dims == 5) && (input.type() == CV_32F || input.type() == CV_16S)); for (size_t i = 0; i < outputs.size(); i++) { CV_Assert(inputs[i].type() == input.type()); - CV_Assert((inputs[i].dims == 4 || inputs[i].dims == 5) && inputs[i].size[1] == input.size[1]); + CV_Assert(((input.dims == 3 && kernel_size.size() == 1) || inputs[i].dims == 4 || inputs[i].dims == 5) && inputs[i].size[1] == input.size[1]); for (int j = 0; j < inputs[i].dims; j++) { CV_Assert(inputs[i].size[j] == input.size[j]); } @@ -261,19 +266,26 @@ public: virtual bool supportBackend(int backendId) CV_OVERRIDE { + size_t ksize = kernel_size.size(); #ifdef HAVE_INF_ENGINE if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) { - if (kernel_size.size() == 3) + if (ksize == 1) + return false; + if (ksize == 3) return preferableTarget == DNN_TARGET_CPU; if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || preferableTarget != DNN_TARGET_MYRIAD) && blobs.empty()) return false; return (preferableTarget != DNN_TARGET_MYRIAD || dilation.width == dilation.height); } - else #endif - return (kernel_size.size() == 3 && preferableTarget == DNN_TARGET_CPU && backendId == DNN_BACKEND_OPENCV) || - (kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || (backendId == DNN_BACKEND_HALIDE && !blobs.empty()))); + if (backendId == DNN_BACKEND_OPENCV) + return ksize >= 1 && ksize <= 3; +#ifdef HAVE_HALIDE + if (backendId == DNN_BACKEND_HALIDE) + return ksize == 2 && !blobs.empty(); +#endif + return false; } bool getMemoryShapes(const std::vector &inputs, @@ -325,18 +337,27 @@ public: inputs_arr.getMatVector(inputs); // prepare weightsMat where each row is aligned and has enough zero padding on the right to // use vectorized (i.e. with intrinsics) loops without tail processing - Mat wm = blobs.empty() ? inputs[1].reshape(1, numOutput) : blobs[0].reshape(1, numOutput); - if( wm.step1() % VEC_ALIGN != 0 ) + if (!blobs.empty()) { - int newcols = (int)alignSize(wm.step1(), VEC_ALIGN); - Mat wm_buffer = Mat(numOutput, newcols, wm.type()); - Mat wm_padding = wm_buffer.colRange(wm.cols, newcols); - wm_padding.setTo(Scalar::all(0.)); - Mat wm_aligned = wm_buffer.colRange(0, wm.cols); - wm.copyTo(wm_aligned); - wm = wm_aligned; + Mat wm = blobs[0].reshape(1, numOutput); + if( wm.step1() % VEC_ALIGN != 0 ) + { + int newcols = (int)alignSize(wm.step1(), VEC_ALIGN); + Mat wm_buffer = Mat(numOutput, newcols, wm.type()); + Mat wm_padding = wm_buffer.colRange(wm.cols, newcols); + wm_padding.setTo(Scalar::all(0.)); + Mat wm_aligned = wm_buffer.colRange(0, wm.cols); + wm.copyTo(wm_aligned); + wm = wm_aligned; + } + weightsMat = wm; } - weightsMat = wm; + else + { + // initialized in .forward() + weightsMat.release(); + } + weightsMultipliers.assign(numOutput, 1.0); Mat biasMat = hasBias() ? blobs[1].reshape(1, numOutput) : Mat(); @@ -678,8 +699,11 @@ public: { size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(), 1, std::multiplies()); - CV_Assert_N( - (input.dims == 4 || input.dims == 5) && (input.dims == output.dims), + bool isConv1D = input.dims == 3; + bool isConv2D = input.dims == 4; + bool isConv3D = input.dims == 5; + CV_CheckEQ(static_cast(kernel_size.size()), input.dims - 2, ""); + CV_Assert_N(input.dims == output.dims, input.size[0] == output.size[0], weights.rows == output.size[1], weights.cols == (input.size[1]/ngroups)*karea, @@ -689,12 +713,15 @@ public: input.isContinuous(), output.isContinuous(), biasvec.size() == (size_t)output.size[1]+2); + CV_Check(weights.step1(), weights.step1() % VEC_ALIGN == 0, ""); + CV_CheckType(weights.type(), CV_32FC1, ""); ParallelConv p; p.input_ = &input; p.weights_ = &weights; p.output_ = &output; - for( int i = 0; i < 4; i++ ) p.outShape[i] = output.size[i]; + int max_ind = isConv1D? 3: 4; + for( int i = 0; i < max_ind; i++ ) p.outShape[i] = output.size[i]; p.outShape[1] /= ngroups; p.kernel_size = kernel_size; p.strides = strides; p.dilations = dilations; @@ -706,20 +733,19 @@ public: int inpCnAll = input.size[1]; int depth = (input.dims == 5) ? input.size[2] : 1; int width = input.size[input.dims - 1]; - int height = input.size[input.dims - 2]; + int height = isConv1D? 1 : input.size[input.dims - 2]; int inpCn = inpCnAll / ngroups; - bool isConv2D = kernel_size.size() == 2; - - p.is1x1_ = isConv2D && kernel_size[0] == 1 && kernel_size[1] == 1 && - pads_begin[0] == 0 && pads_begin[1] == 0; + p.is1x1_ = (isConv2D && kernel_size[0] == 1 && kernel_size[1] == 1 && + pads_begin[0] == 0 && pads_begin[1] == 0) || + (isConv1D && pads_begin[0] == 0 && kernel_size[0] == 1); p.useAVX = checkHardwareSupport(CPU_AVX) && isConv2D; p.useAVX2 = checkHardwareSupport(CPU_AVX2) && isConv2D; p.useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX && isConv2D; - int kernel_d = !isConv2D? kernel_size[0] : 1; - int kernel_h = kernel_size[kernel_size.size() - 2]; + int kernel_d = isConv3D? kernel_size[0] : 1; + int kernel_h = isConv1D? 1 : kernel_size[kernel_size.size() - 2]; int kernel_w = kernel_size.back(); int blk_size_cn0 = cvCeil(800./(kernel_w*kernel_h)); @@ -729,14 +755,20 @@ public: ncn = std::min(ncn, inpCn); p.blk_size_cn = ncn; - int dil_d = !isConv2D? dilations[0] : 1; - int dil_h = dilations[dilations.size() - 2]; + int dil_d = isConv3D? dilations[0] : 1; + int dil_h = isConv1D? 1 : dilations[dilations.size() - 2]; int dil_w = dilations.back(); p.ofstab_.resize(karea * ncn); int* ofstab = &p.ofstab_[0]; - if (isConv2D) + if (isConv1D) + { + for( int k = 0; k < ncn; k++ ) + for( int k_c = 0; k_c < kernel_w; k_c++ ) + ofstab[k*kernel_w + k_c] = k*width + k_c*dil_w; + } + else if (isConv2D) { for( int k = 0; k < ncn; k++ ) for( int k_r = 0; k_r < kernel_h; k_r++ ) @@ -765,34 +797,36 @@ public: { const int valign = ConvolutionLayerImpl::VEC_ALIGN; int ngroups = ngroups_, batchSize = input_->size[0]*ngroups; + bool isConv1D = input_->dims == 3; bool isConv2D = input_->dims == 4; + bool isConv3D = input_->dims == 5; int outW = output_->size[output_->dims - 1]; - int outH = output_->size[output_->dims - 2]; + int outH = isConv1D? 1 : output_->size[output_->dims - 2]; int outCn = output_->size[1]/ngroups; - int depth = !isConv2D? input_->size[2] : 1; - int height = input_->size[input_->dims - 2]; + int depth = isConv3D? input_->size[2] : 1; + int height = isConv1D? 1 : input_->size[input_->dims - 2]; int width = input_->size[input_->dims - 1]; int inpCn = input_->size[1]/ngroups; const int nstripes = nstripes_; - int kernel_d = !isConv2D? kernel_size[0] : 1; - int kernel_h = kernel_size[kernel_size.size() - 2]; + int kernel_d = isConv3D? kernel_size[0] : 1; + int kernel_h = isConv1D? 1 : kernel_size[kernel_size.size() - 2]; int kernel_w = kernel_size.back(); int karea = kernel_w*kernel_h*kernel_d; - int pad_d = !isConv2D? pads_begin[0] : 0; - int pad_t = pads_begin[pads_begin.size() - 2]; + int pad_d = isConv3D? pads_begin[0] : 0; + int pad_t = isConv1D? 0 : pads_begin[pads_begin.size() - 2]; int pad_l = pads_begin.back(); - int stride_d = !isConv2D? strides[0] : 0; - int stride_h = strides[strides.size() - 2]; + int stride_d = isConv3D? strides[0] : 0; + int stride_h = isConv1D? 0 : strides[strides.size() - 2]; int stride_w = strides.back(); - int dilation_d = !isConv2D? dilations[0] : 1; - int dilation_h = dilations[dilations.size() - 2]; + int dilation_d = isConv3D? dilations[0] : 1; + int dilation_h = isConv1D? 1 : dilations[dilations.size() - 2]; int dilation_w = dilations.back(); int i, j, k, d; @@ -1032,7 +1066,71 @@ public: // do im2row for a part of input tensor float* rowbuf = rowbuf0; - if (isConv2D) + if (isConv1D) + { + for( ofs = ofs0; ofs < ofs1; out_j = 0, ++out_i ) + { + int delta = std::min(ofs1 - ofs, outW - out_j); + int out_j1 = out_j + delta; + + int in_j = out_j * stride_w - pad_l; + const float* imgptr = data_inp0 + cn0*width + in_j; + ofs += delta; + + // do im2row for a part of input tensor + if( is1x1 ) + { + for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w ) + { + for( k = 0; k < vsz; k++ ) + rowbuf[k] = imgptr[k*inpPlaneSize]; + } + } + else + { + for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w ) + { + // this condition should be true for most of the tensor elements, i.e. + // most of the time the kernel aperture is inside the tensor X-Y plane. + if( out_j + 2 <= out_j1 && 0 <= in_j && in_j + stride_w*2 <= width - (kernel_w-1)*dilation_w ) + { + for( k = 0; k < vsz; k++ ) + { + int k1 = ofstab[k]; + float v0 = imgptr[k1]; + float v1 = imgptr[k1 + stride_w]; + rowbuf[k] = v0; + rowbuf[k+vsz_a] = v1; + } + out_j++; + rowbuf += vsz_a; + imgptr += stride_w; + in_j += stride_w; + } + else + { + int i0 = std::max(0, (-in_j + dilation_w-1)/dilation_w); + int i1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w); + + // here some non-continuous sub-row of the row will not be + // filled from the tensor; we need to make sure that the uncovered + // elements are explicitly set to 0's. the easiest way is to + // set all the elements to 0's before the loop. + memset(rowbuf, 0, vsz*sizeof(rowbuf[0])); + for( k = 0; k < ncn; k++ ) + { + for( i = i0; i < i1; i++ ) + { + int imgofs = k*width + i*dilation_w; + rowbuf[k*kernel_w + i] = imgptr[imgofs]; + } + } + } + } + } + } + } + else if (isConv2D) { if( is1x1 && stride_w == 1 && stride_h == 1 ) { @@ -1265,9 +1363,12 @@ public: vs12 = v_setzero_f32(), vs13 = v_setzero_f32(); for( k = 0; k < vsz; k += 4, rptr += 4 ) { - v_float32x4 w0 = v_load_aligned(wptr0 + k), w1 = v_load_aligned(wptr1 + k); - v_float32x4 r0 = v_load_aligned(rptr), r1 = v_load_aligned(rptr + vsz_a), - r2 = v_load_aligned(rptr + vsz_a*2), r3 = v_load_aligned(rptr + vsz_a*3); + v_float32x4 w0 = v_load_aligned(wptr0 + k); + v_float32x4 w1 = v_load_aligned(wptr1 + k); + v_float32x4 r0 = v_load_aligned(rptr); + v_float32x4 r1 = v_load_aligned(rptr + vsz_a); + v_float32x4 r2 = v_load_aligned(rptr + vsz_a*2); + v_float32x4 r3 = v_load_aligned(rptr + vsz_a*3); vs00 += w0*r0; vs01 += w0*r1; @@ -1337,6 +1438,12 @@ public: #ifdef HAVE_OPENCL bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals) { + if (kernel_size.size() != 2) + { + // no OpenCL optimizations, see .supportedBacked() + return false; + } + std::vector inputs; std::vector outputs; @@ -1520,26 +1627,35 @@ public: if (blobs.empty()) { Mat wm = inputs[1].reshape(1, outCn); - if( wm.step1() % VEC_ALIGN != 0 ) + if (wm.data != weightsMat.data) { - wm.copyTo(weightsMat); + int newcols = (int)alignSize(wm.step1(), VEC_ALIGN); + Mat wm_buffer = Mat(numOutput, newcols, wm.type()); + Mat wm_padding = wm_buffer.colRange(wm.cols, newcols); + wm_padding.setTo(Scalar::all(0.)); + weightsMat = wm_buffer.colRange(0, wm.cols); + + wm.copyTo((const Mat&)weightsMat); if (inputs.size() > 2) { Mat biasMat = inputs[2].reshape(1, outCn); biasMat.col(0).copyTo(biasvec); - biasvec.resize(outCn + 2); - } - else - { - biasvec.resize(outCn + 2, 0); } + biasvec.resize(outCn + 2, 0); } } - - /*printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n", - name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3], - kernel.width, kernel.height, pad.width, pad.height, - stride.width, stride.height, dilation.width, dilation.height);*/ + /*if (inputs[0].dims > 3) { + printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n", + name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3], + kernel.width, kernel.height, pad.width, pad.height, + stride.width, stride.height, dilation.width, dilation.height); + } + else { + printf("conv %s: input (%d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n", + name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], + kernel.width, kernel.height, pad.width, pad.height, + stride.width, stride.height, dilation.width, dilation.height); + }*/ int inpGroupCn = blobs.empty() ? inputs[1].size[1] : blobs[0].size[1]; CV_Assert_N(inputs.size() >= (size_t)1, inputs[0].size[1] % inpGroupCn == 0, outputs.size() == 1, inputs[0].data != outputs[0].data); diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 56683f4c14..9443336305 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -200,12 +200,12 @@ LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_prot if(attribute_name == "kernel_shape") { - CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3); + CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3); lp.set("kernel_size", parse(attribute_proto.ints())); } else if(attribute_name == "strides") { - CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3); + CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3); lp.set("stride", parse(attribute_proto.ints())); } else if(attribute_name == "pads") @@ -229,7 +229,7 @@ LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_prot else { // Convolution or pooling. - CV_Assert(attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6); + CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 4 || attribute_proto.ints_size() == 6); lp.set("pad", parse(attribute_proto.ints())); } } @@ -244,7 +244,7 @@ LayerParams ONNXImporter::getLayerParams(const opencv_onnx::NodeProto& node_prot } else if(attribute_name == "dilations") { - CV_Assert(attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3); + CV_Assert(attribute_proto.ints_size() == 1 || attribute_proto.ints_size() == 2 || attribute_proto.ints_size() == 3); lp.set("dilation", parse(attribute_proto.ints())); } else if (attribute_proto.has_i()) diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 993ba56be4..5c6de55da5 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -183,9 +183,14 @@ TEST_P(Test_ONNX_layers, Convolution3D) #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); #endif - if (target != DNN_TARGET_CPU) - throw SkipTestException("Only CPU is supported"); testONNXModels("conv3d"); +} + +TEST_P(Test_ONNX_layers, Convolution3D_bias) +{ +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_LT(2019010000) + applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_VERSION); +#endif testONNXModels("conv3d_bias"); } @@ -648,6 +653,58 @@ TEST_P(Test_ONNX_layers, ResizeOpset11_Torch1_6) testONNXModels("resize_opset11_torch1.6"); } +TEST_P(Test_ONNX_layers, Conv1d) +{ + testONNXModels("conv1d"); +} + +TEST_P(Test_ONNX_layers, Conv1d_bias) +{ + testONNXModels("conv1d_bias"); +} + +TEST_P(Test_ONNX_layers, Conv1d_variable_weight) +{ + String basename = "conv1d_variable_w"; + Net net = readNetFromONNX(_tf("models/" + basename + ".onnx")); + ASSERT_FALSE(net.empty()); + + net.setPreferableBackend(backend); + net.setPreferableTarget(target); + + Mat input = blobFromNPY(_tf("data/input_" + basename + "_0.npy")); + Mat weights = blobFromNPY(_tf("data/input_" + basename + "_1.npy")); + Mat ref = blobFromNPY(_tf("data/output_" + basename + ".npy")); + + net.setInput(input, "0"); + net.setInput(weights, "1"); + + Mat out = net.forward(); + normAssert(ref, out, "", default_l1, default_lInf); +} + +TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias) +{ + String basename = "conv1d_variable_wb"; + Net net = readNetFromONNX(_tf("models/" + basename + ".onnx")); + ASSERT_FALSE(net.empty()); + + net.setPreferableBackend(backend); + net.setPreferableTarget(target); + + Mat input = blobFromNPY(_tf("data/input_" + basename + "_0.npy")); + Mat weights = blobFromNPY(_tf("data/input_" + basename + "_1.npy")); + Mat bias = blobFromNPY(_tf("data/input_" + basename + "_2.npy")); + Mat ref = blobFromNPY(_tf("data/output_" + basename + ".npy")); + + net.setInput(input, "0"); + net.setInput(weights, "1"); + net.setInput(bias, "bias"); + + Mat out = net.forward(); + normAssert(ref, out, "", default_l1, default_lInf); +} + INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets()); class Test_ONNX_nets : public Test_ONNX_layers diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index 68b720a375..e9c1562b4c 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -173,8 +173,6 @@ TEST_P(Test_TensorFlow_layers, Convolution3D) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); // Only CPU on DLIE backend is supported if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH && target != DNN_TARGET_CPU) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); // Only CPU on DLIE backend is supported - if (target != DNN_TARGET_CPU) - throw SkipTestException("Only CPU is supported"); runTensorFlowNet("conv3d"); } From 3826158547d4af8250100164377ec2271a49e4c9 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Fri, 13 Nov 2020 06:27:18 +0000 Subject: [PATCH 089/152] java: 'namespace_' class prefix, support inner classes - support multi-level namespaces - support inner classes (Params) - reduce scope of 'using namespace' in JNI C++ code --- modules/java/generator/gen_java.py | 137 ++++++++++++++++++++--------- 1 file changed, 94 insertions(+), 43 deletions(-) diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index 279a2e140b..6c604ed04b 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -123,7 +123,8 @@ T_CPP_MODULE = Template(read_contents(os.path.join(SCRIPT_DIR, 'templates/cpp_mo class GeneralInfo(): def __init__(self, type, decl, namespaces): - self.symbol_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces) + self.symbol_id, self.parent_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces) + self.cname = get_cname(self.symbol_id) # parse doxygen comments self.params={} @@ -150,6 +151,9 @@ class GeneralInfo(): returns: (namespace, classpath, classname, name) ''' name = name[name.find(" ")+1:].strip() # remove struct/class/const prefix + parent = name[:name.rfind('.')].strip() + if len(parent) == 0: + parent = None spaceName = "" localName = name # . for namespace in sorted(namespaces, key=len, reverse=True): @@ -159,31 +163,44 @@ class GeneralInfo(): break pieces = localName.split(".") if len(pieces) > 2: # ... - return name, spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1] + return name, parent, spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1] elif len(pieces) == 2: # . - return name, spaceName, pieces[0], pieces[0], pieces[1] + return name, parent, spaceName, pieces[0], pieces[0], pieces[1] elif len(pieces) == 1: # - return name, spaceName, "", "", pieces[0] + return name, parent, spaceName, "", "", pieces[0] else: - return name, spaceName, "", "" # error?! + return name, parent, spaceName, "", "" # error?! - def fullName(self, isCPP=False): - result = ".".join([self.fullClass(), self.name]) - return result if not isCPP else get_cname(result) + def fullNameOrigin(self): + result = self.symbol_id + return result - def fullClass(self, isCPP=False): + def fullNameJAVA(self): + result = '.'.join([self.fullParentNameJAVA(), self.jname]) + return result + + def fullNameCPP(self): + result = self.cname + return result + + def fullParentNameJAVA(self): result = ".".join([f for f in [self.namespace] + self.classpath.split(".") if len(f)>0]) - return result if not isCPP else get_cname(result) + return result + + def fullParentNameCPP(self): + result = get_cname(self.parent_id) + return result class ConstInfo(GeneralInfo): def __init__(self, decl, addedManually=False, namespaces=[], enumType=None): GeneralInfo.__init__(self, "const", decl, namespaces) - self.cname = get_cname(self.name) self.value = decl[1] self.enumType = enumType self.addedManually = addedManually if self.namespace in namespaces_dict: - self.name = '%s_%s' % (namespaces_dict[self.namespace], self.name) + prefix = namespaces_dict[self.namespace] + if prefix: + self.name = '%s_%s' % (prefix, self.name) def __repr__(self): return Template("CONST $name=$value$manual").substitute(name=self.name, @@ -227,7 +244,6 @@ class ClassPropInfo(): class ClassInfo(GeneralInfo): def __init__(self, decl, namespaces=[]): # [ 'class/struct cname', ': base', [modlist] ] GeneralInfo.__init__(self, "class", decl, namespaces) - self.cname = get_cname(self.name) self.methods = [] self.methods_suffixes = {} self.consts = [] # using a list to save the occurrence order @@ -242,6 +258,18 @@ class ClassInfo(GeneralInfo): for m in decl[2]: if m.startswith("="): self.jname = m[1:] + + if self.classpath: + prefix = self.classpath.replace('.', '_') + self.name = '%s_%s' % (prefix, self.name) + self.jname = '%s_%s' % (prefix, self.jname) + + if self.namespace in namespaces_dict: + prefix = namespaces_dict[self.namespace] + if prefix: + self.name = '%s_%s' % (prefix, self.name) + self.jname = '%s_%s' % (prefix, self.jname) + self.base = '' if decl[1]: #self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip() @@ -358,11 +386,26 @@ class FuncInfo(GeneralInfo): self.isconstructor = self.name == self.classname if "[" in self.name: self.jname = "getelem" - if self.namespace in namespaces_dict: - self.jname = '%s_%s' % (namespaces_dict[self.namespace], self.jname) for m in decl[2]: - if m.startswith("="): + if m.startswith("="): # alias from WRAP_AS self.jname = m[1:] + if self.classpath and self.classname != self.classpath: + prefix = self.classpath.replace('.', '_') + self.classname = prefix #'%s_%s' % (prefix, self.classname) + if self.isconstructor: + self.name = prefix #'%s_%s' % (prefix, self.name) + self.jname = prefix #'%s_%s' % (prefix, self.jname) + + if self.namespace in namespaces_dict: + prefix = namespaces_dict[self.namespace] + if prefix: + if self.classname: + self.classname = '%s_%s' % (prefix, self.classname) + if self.isconstructor: + self.jname = '%s_%s' % (prefix, self.jname) + else: + self.jname = '%s_%s' % (prefix, self.jname) + self.static = ["","static"][ "/S" in decl[2] ] self.ctype = re.sub(r"^CvTermCriteria", "TermCriteria", decl[1] or "") self.args = [] @@ -374,6 +417,12 @@ class FuncInfo(GeneralInfo): arg[3] = arg_fix_map.get('attrib', arg[3]) #fixing arg attrib self.args.append(ArgInfo(arg)) + def fullClassJAVA(self): + return self.fullParentNameJAVA() + + def fullClassCPP(self): + return self.fullParentNameCPP() + def __repr__(self): return Template("FUNC <$ctype $namespace.$classpath.$name $args>").substitute(**self.__dict__) @@ -388,7 +437,8 @@ class JavaWrapperGenerator(object): def clear(self): self.namespaces = ["cv"] - self.classes = { "Mat" : ClassInfo([ 'class Mat', '', [], [] ], self.namespaces) } + classinfo_Mat = ClassInfo([ 'class cv.Mat', '', [], [] ], self.namespaces) + self.classes = { "Mat" : classinfo_Mat } self.module = "" self.Module = "" self.ported_func_list = [] @@ -411,7 +461,7 @@ class JavaWrapperGenerator(object): type_dict.setdefault(name, {}).update( { "j_type" : classinfo.jname, "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),), - "jni_name" : "(*("+classinfo.fullName(isCPP=True)+"*)%(n)s_nativeObj)", "jni_type" : "jlong", + "jni_name" : "(*("+classinfo.fullNameCPP()+"*)%(n)s_nativeObj)", "jni_type" : "jlong", "suffix" : "J", "j_import" : "org.opencv.%s.%s" % (self.module, classinfo.jname) } @@ -419,7 +469,7 @@ class JavaWrapperGenerator(object): type_dict.setdefault(name+'*', {}).update( { "j_type" : classinfo.jname, "jn_type" : "long", "jn_args" : (("__int64", ".nativeObj"),), - "jni_name" : "("+classinfo.fullName(isCPP=True)+"*)%(n)s_nativeObj", "jni_type" : "jlong", + "jni_name" : "("+classinfo.fullNameCPP()+"*)%(n)s_nativeObj", "jni_type" : "jlong", "suffix" : "J", "j_import" : "org.opencv.%s.%s" % (self.module, classinfo.jname) } @@ -446,7 +496,7 @@ class JavaWrapperGenerator(object): type_dict.setdefault("Ptr_"+name, {}).update( { "j_type" : classinfo.jname, "jn_type" : "long", "jn_args" : (("__int64", ".getNativeObjAddr()"),), - "jni_name" : "*((Ptr<"+classinfo.fullName(isCPP=True)+">*)%(n)s_nativeObj)", "jni_type" : "jlong", + "jni_name" : "*((Ptr<"+classinfo.fullNameCPP()+">*)%(n)s_nativeObj)", "jni_type" : "jlong", "suffix" : "J", "j_import" : "org.opencv.%s.%s" % (self.module, classinfo.jname) } @@ -489,14 +539,15 @@ class JavaWrapperGenerator(object): def add_func(self, decl): fi = FuncInfo(decl, namespaces=self.namespaces) classname = fi.classname or self.Module + class_symbol_id = classname if self.isWrapped(classname) else fi.classpath.replace('.', '_') #('.'.join([fi.namespace, fi.classpath])[3:]) if classname in class_ignore_list: logging.info('ignored: %s', fi) elif classname in ManualFuncs and fi.jname in ManualFuncs[classname]: logging.info('manual: %s', fi) - elif not self.isWrapped(classname): + elif not self.isWrapped(class_symbol_id): logging.warning('not found: %s', fi) else: - self.getClass(classname).addMethod(fi) + self.getClass(class_symbol_id).addMethod(fi) logging.info('ok: %s', fi) # calc args with def val cnt = len([a for a in fi.args if a.defval]) @@ -521,7 +572,7 @@ class JavaWrapperGenerator(object): # TODO: support UMat versions of declarations (implement UMat-wrapper for Java) parser = hdr_parser.CppHeaderParser(generate_umat_decls=False) - self.add_class( ['class ' + self.Module, '', [], []] ) # [ 'class/struct cname', ':bases', [modlist] [props] ] + self.add_class( ['class cv.' + self.Module, '', [], []] ) # [ 'class/struct cname', ':bases', [modlist] [props] ] # scan the headers and build more descriptive maps of classes, consts, functions includes = [] @@ -582,9 +633,9 @@ class JavaWrapperGenerator(object): report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i])) return report.getvalue() - def fullTypeName(self, t): + def fullTypeNameCPP(self, t): if self.isWrapped(t): - return self.getClass(t).fullName(isCPP=True) + return self.getClass(t).fullNameCPP() else: return cast_from(t) @@ -897,7 +948,7 @@ class JavaWrapperGenerator(object): default = "" elif not fi.ctype: # c-tor if self.isSmartClass(ci): - ret = "return (jlong)(new Ptr<%(ctype)s>(_retval_));" % { 'ctype': fi.fullClass(isCPP=True) } + ret = "return (jlong)(new Ptr<%(ctype)s>(_retval_));" % { 'ctype': fi.fullClassCPP() } else: ret = "return (jlong) _retval_;" elif "v_type" in type_dict[fi.ctype]: # c-tor @@ -907,9 +958,9 @@ class JavaWrapperGenerator(object): ret = "return env->NewStringUTF(_retval_.c_str());" default = 'return env->NewStringUTF("");' elif self.isWrapped(fi.ctype): # wrapped class: - ret = "return (jlong) new %s(_retval_);" % self.fullTypeName(fi.ctype) + ret = "return (jlong) new %s(_retval_);" % self.fullTypeNameCPP(fi.ctype) elif fi.ctype.startswith('Ptr_'): - c_prologue.append("typedef Ptr<%s> %s;" % (self.fullTypeName(fi.ctype[4:]), fi.ctype)) + c_prologue.append("typedef Ptr<%s> %s;" % (self.fullTypeNameCPP(fi.ctype[4:]), fi.ctype)) ret = "return (jlong)(new %(ctype)s(_retval_));" % { 'ctype':fi.ctype } elif self.isWrapped(ret_type): # pointer to wrapped class: ret = "return (jlong) _retval_;" @@ -924,12 +975,12 @@ class JavaWrapperGenerator(object): else: name = prop_name + ";//" - cvname = fi.fullName(isCPP=True) - retval = self.fullTypeName(fi.ctype) + " _retval_ = " if ret else "return " + cvname = fi.fullNameCPP() + retval = self.fullTypeNameCPP(fi.ctype) + " _retval_ = " if ret else "return " if fi.ctype == "void": retval = "" elif fi.ctype == "String": - retval = "cv::" + self.fullTypeName(fi.ctype) + " _retval_ = " + retval = "cv::" + self.fullTypeNameCPP(fi.ctype) + " _retval_ = " elif fi.ctype == "string": retval = "std::string _retval_ = " elif "v_type" in type_dict[fi.ctype]: # vector is returned @@ -945,18 +996,18 @@ class JavaWrapperGenerator(object): if fi.classname: if not fi.ctype: # c-tor if self.isSmartClass(ci): - retval = self.smartWrap(ci, fi.fullClass(isCPP=True)) + " _retval_ = " - cvname = "makePtr<" + fi.fullClass(isCPP=True) +">" + retval = self.smartWrap(ci, fi.fullClassCPP()) + " _retval_ = " + cvname = "makePtr<" + fi.fullClassCPP() +">" else: - retval = fi.fullClass(isCPP=True) + "* _retval_ = " - cvname = "new " + fi.fullClass(isCPP=True) + retval = fi.fullClassCPP() + "* _retval_ = " + cvname = "new " + fi.fullClassCPP() elif fi.static: - cvname = fi.fullName(isCPP=True) + cvname = fi.fullNameCPP() else: cvname = ("me->" if not self.isSmartClass(ci) else "(*me)->") + name c_prologue.append( "%(cls)s* me = (%(cls)s*) self; //TODO: check for NULL" - % { "cls" : self.smartWrap(ci, fi.fullClass(isCPP=True))} + % { "cls" : self.smartWrap(ci, fi.fullClassCPP())} ) cvargs = [] for a in args: @@ -981,13 +1032,12 @@ class JavaWrapperGenerator(object): clazz = ci.jname cpp_code.write ( Template( """ -${namespace} - JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname ($argst); JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname ($args) { + ${namespace} static const char method_name[] = "$module::$fname()"; try { LOGD("%s", method_name);$prologue @@ -1014,7 +1064,7 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname cvargs = " " + ", ".join(cvargs) + " " if cvargs else "", default = "\n " + default if default else "", retval = retval, - namespace = ('using namespace ' + ci.namespace.replace('.', '::') + ';') if ci.namespace else '' + namespace = ('using namespace ' + ci.namespace.replace('.', '::') + ';') if ci.namespace and ci.namespace != 'cv' else '' ) ) # adding method signature to dictionary @@ -1081,13 +1131,14 @@ JNIEXPORT $rtype JNICALL Java_org_opencv_${module}_${clazz}_$fname self.gen_func(ci, fi) # props for pi in ci.props: + basename = ci.fullNameOrigin() # getter - getter_name = ci.fullName() + ".get_" + pi.name + getter_name = basename + ".get_" + pi.name fi = FuncInfo( [getter_name, pi.ctype, [], []], self.namespaces ) # [ funcname, return_ctype, [modifiers], [args] ] self.gen_func(ci, fi, pi.name) if pi.rw: #setter - setter_name = ci.fullName() + ".set_" + pi.name + setter_name = basename + ".set_" + pi.name fi = FuncInfo( [ setter_name, "void", [], [ [pi.ctype, pi.name, "", [], ""] ] ], self.namespaces) self.gen_func(ci, fi, pi.name) @@ -1131,7 +1182,7 @@ JNIEXPORT void JNICALL Java_org_opencv_%(module)s_%(j_cls)s_delete delete (%(cls)s*) self; } -""" % {"module" : module.replace('_', '_1'), "cls" : self.smartWrap(ci, ci.fullName(isCPP=True)), "j_cls" : ci.jname.replace('_', '_1')} +""" % {"module" : module.replace('_', '_1'), "cls" : self.smartWrap(ci, ci.fullNameCPP()), "j_cls" : ci.jname.replace('_', '_1')} ) def getClass(self, classname): From 05c011e842195272f6147e17e5eb734d7d034ba4 Mon Sep 17 00:00:00 2001 From: Aitik Gupta Date: Sat, 14 Nov 2020 08:17:44 +0530 Subject: [PATCH 090/152] Small typo-fix --- .../video-input-psnr-ssim/video_input_psnr_ssim.markdown | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown b/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown index 08cc596964..ffd4d0213e 100644 --- a/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown +++ b/doc/tutorials/videoio/video-input-psnr-ssim/video_input_psnr_ssim.markdown @@ -131,7 +131,7 @@ For properties you can read and change look into the documentation of the @ref c We want to check just how imperceptible our video converting operation went, therefore we need a system to check frame by frame the similarity or differences. The most common algorithm used for this is the PSNR (aka **Peak signal-to-noise ratio**). The simplest definition of this starts out -from the *mean squad error*. Let there be two images: I1 and I2; with a two dimensional size i and +from the *mean squared error*. Let there be two images: I1 and I2; with a two dimensional size i and j, composed of c number of channels. \f[MSE = \frac{1}{c*i*j} \sum{(I_1-I_2)^2}\f] From 79a82013ad2af04269891ce15281c9fdc31d3f66 Mon Sep 17 00:00:00 2001 From: Jose Quaresma Date: Sun, 1 Nov 2020 12:29:56 +0000 Subject: [PATCH 091/152] samples: cmake: digits needs opencv_dnn module to build Intrudeced in commit 397ba2d9aafb5312e777ce2f886d7b568109e931: add OpenCV sample for digit and text recongnition, and provide multiple OCR models. https://github.com/opencv/opencv/commit/397ba2d9aafb5312e777ce2f886d7b568109e931 Signed-off-by: Jose Quaresma --- samples/cpp/CMakeLists.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/samples/cpp/CMakeLists.txt b/samples/cpp/CMakeLists.txt index 617629df2e..14ab6141df 100644 --- a/samples/cpp/CMakeLists.txt +++ b/samples/cpp/CMakeLists.txt @@ -14,6 +14,7 @@ set(OPENCV_CPP_SAMPLES_REQUIRED_DEPS opencv_features2d opencv_calib3d opencv_stitching + opencv_dnn ${OPENCV_MODULES_PUBLIC} ${OpenCV_LIB_COMPONENTS}) ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS}) From 2e7ef6f4e80f11975086cdf57240ae00b1581a3d Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 14 Nov 2020 19:10:43 +0000 Subject: [PATCH 092/152] objc: fix std::string handling - arg types may be passed as string instead of std::string --- modules/core/misc/objc/gen_dict.json | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/modules/core/misc/objc/gen_dict.json b/modules/core/misc/objc/gen_dict.json index 4cb8133dc4..05082ce1ca 100644 --- a/modules/core/misc/objc/gen_dict.json +++ b/modules/core/misc/objc/gen_dict.json @@ -215,6 +215,13 @@ "from_cpp": "[NSString stringWithUTF8String:%(n)s.c_str()]", "swift_type": "String" }, + "string": { + "cast_to": "std::string", + "objc_type": "NSString*", + "to_cpp": "std::string(%(n)s.UTF8String)", + "from_cpp": "[NSString stringWithUTF8String:%(n)s.c_str()]", + "swift_type": "String" + }, "TermCriteria": { "objc_type": "TermCriteria*", "to_cpp": "%(n)s.nativeRef", From 9e84b860f233f3acad6de325f214b6bd62fc5a3f Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 14 Nov 2020 07:16:13 +0000 Subject: [PATCH 093/152] cmake: update objc generator scripts - allow to run generator without strong requirement of building 'objc' module --- modules/objc/CMakeLists.txt | 15 ++- modules/objc/generator/CMakeLists.txt | 95 +++++++++++++------ modules/objc/generator/gen_objc.py | 19 ++-- .../generator/templates/cmakelists.template | 2 +- platforms/ios/build_framework.py | 10 +- platforms/osx/build_framework.py | 4 + 6 files changed, 106 insertions(+), 39 deletions(-) diff --git a/modules/objc/CMakeLists.txt b/modules/objc/CMakeLists.txt index d4ea6e3563..8cf24de56e 100644 --- a/modules/objc/CMakeLists.txt +++ b/modules/objc/CMakeLists.txt @@ -1,6 +1,19 @@ -if(OPENCV_INITIAL_PASS AND APPLE_FRAMEWORK AND NOT (BUILD_opencv_objc STREQUAL "OFF")) +if(OPENCV_INITIAL_PASS) # generator for Objective-C source code and documentation signatures add_subdirectory(generator) endif() +if(NOT APPLE_FRAMEWORK) + return() +endif() + +set(the_description "The Objective-C bindings") +ocv_add_module(objc BINDINGS opencv_core opencv_imgproc PRIVATE_REQUIRED opencv_objc_bindings_generator) + +add_custom_target(${the_module} + ALL + COMMENT "Objective-C framework" +) +add_dependencies(${the_module} gen_opencv_objc_source) + #include(${CMAKE_CURRENT_SOURCE_DIR}/common.cmake) diff --git a/modules/objc/generator/CMakeLists.txt b/modules/objc/generator/CMakeLists.txt index dd6f58db6d..b3cbbd3f5f 100644 --- a/modules/objc/generator/CMakeLists.txt +++ b/modules/objc/generator/CMakeLists.txt @@ -1,16 +1,18 @@ -set(MODULE_NAME "objc") +set(MODULE_NAME "objc_bindings_generator") set(OPENCV_MODULE_IS_PART_OF_WORLD FALSE) ocv_add_module(${MODULE_NAME} INTERNAL opencv_core opencv_imgproc) -set(OPENCV_OBJC_SIGNATURES_FILE "${CMAKE_CURRENT_BINARY_DIR}/opencv_objc_signatures.json" CACHE INTERNAL "") +#set(OPENCV_OBJC_SIGNATURES_FILE "${CMAKE_CURRENT_BINARY_DIR}/opencv_objc_signatures.json" CACHE INTERNAL "") set(OPENCV_OBJC_BINDINGS_DIR "${CMAKE_CURRENT_BINARY_DIR}" CACHE INTERNAL "") -file(REMOVE_RECURSE "${OPENCV_OBJC_BINDINGS_DIR}/gen") -file(REMOVE "${OPENCV_DEPHELPER}/gen_opencv_objc_source") # force re-run after CMake +file(REMOVE_RECURSE "${OPENCV_OBJC_BINDINGS_DIR}/osx") +file(REMOVE "${OPENCV_DEPHELPER}/gen_opencv_objc_source_osx") # force re-run after CMake +file(REMOVE_RECURSE "${OPENCV_OBJC_BINDINGS_DIR}/ios") +file(REMOVE "${OPENCV_DEPHELPER}/gen_opencv_objc_source_ios") # force re-run after CMake # This file is included from a subdirectory set(OBJC_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/..") -include(${OBJC_SOURCE_DIR}/common.cmake) +include(${OBJC_SOURCE_DIR}/common.cmake) # fill OPENCV_OBJC_MODULES # common files file(GLOB_RECURSE deps "${CMAKE_CURRENT_SOURCE_DIR}/templates/*") @@ -30,15 +32,21 @@ foreach(m ${OPENCV_OBJC_MODULES}) set(__modules_config "${__modules_config} { \"name\": \"${m_}\", \"location\": \"${rel_path}\" }") endforeach(m) +if(HAVE_opencv_objc) + set(__objc_build_dir "\"objc_build_dir\": \"${CMAKE_CURRENT_BINARY_DIR}/../objc\",") +endif() + set(CONFIG_FILE "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json") set(__config_str "{ \"rootdir\": \"${OpenCV_SOURCE_DIR}\", + ${__objc_build_dir} \"modules\": [ ${__modules_config} ] } ") +#TODO: ocv_update_file("${CONFIG_FILE}" "${__config_str}" ON_CHANGE_REMOVE "${OPENCV_DEPHELPER}/gen_opencv_objc_source") if(EXISTS "${CONFIG_FILE}") file(READ "${CONFIG_FILE}" __content) else() @@ -52,33 +60,66 @@ unset(__config_str) set(objc_generated_files # "${OPENCV_OBJC_SIGNATURES_FILE}" - "${OPENCV_DEPHELPER}/gen_opencv_objc_source" ) string(REPLACE "opencv_" "" MODULES "${OPENCV_OBJC_MODULES}") -if(IOS) - set(TARGET "ios") -else() - set(TARGET "osx") +if(NOT DEFINED OPENCV_OBJC_TARGET AND APPLE_FRAMEWORK) + if(IOS) + set(OPENCV_OBJC_TARGET "ios") + else() + set(OPENCV_OBJC_TARGET "osx") + endif() endif() -add_custom_command( - OUTPUT ${objc_generated_files} - COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${OBJC_SOURCE_DIR}/generator/gen_objc.py" -p "${OBJC_SOURCE_DIR}/../python/src2/gen2.py" -c "${CONFIG_FILE}" -t "${TARGET}" -f "${FRAMEWORK_NAME}" - COMMAND ${CMAKE_COMMAND} -E touch "${OPENCV_DEPHELPER}/gen_opencv_objc_source" - WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}" - DEPENDS "${OBJC_SOURCE_DIR}/generator/gen_objc.py" - "${OBJC_SOURCE_DIR}/../python/src2/gen2.py" - "${OBJC_SOURCE_DIR}/../python/src2/hdr_parser.py" - # don't, result of file(WRITE): "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json" - ${deps} - # not allowed (file(WRITE) result): "${CONFIG_FILE}" - COMMENT "Generate files for Objective-C bindings" -) +if(NOT DEFINED OPENCV_OBJC_FRAMEWORK_NAME) + if(DEFINED FRAMEWORK_NAME) + set(OPENCV_OBJC_FRAMEWORK_NAME "${FRAMEWORK_NAME}") + else() + set(OPENCV_OBJC_FRAMEWORK_NAME "opencv2") + endif() +endif() -add_custom_target(gen_opencv_objc_source ALL DEPENDS ${objc_generated_files} - SOURCES "${OBJC_SOURCE_DIR}/generator/gen_objc.py" - "${OBJC_SOURCE_DIR}/generator/templates/cmakelists.template" - "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json" +set(objc_generated_targets "") + +macro(ocv_add_objc_generated_target TARGET) + set(objc_${TARGET}_generated_output_dependecy "${OPENCV_DEPHELPER}/gen_opencv_objc_source_${TARGET}") + file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}") + add_custom_command( + OUTPUT ${objc_generated_files} "${objc_${TARGET}_generated_output_dependecy}" + COMMAND ${PYTHON_DEFAULT_EXECUTABLE} "${OBJC_SOURCE_DIR}/generator/gen_objc.py" + -p "${OBJC_SOURCE_DIR}/../python/src2/gen2.py" + -c "${CONFIG_FILE}" + -t "${TARGET}" + -f "${OPENCV_OBJC_FRAMEWORK_NAME}" + COMMAND ${CMAKE_COMMAND} -E touch "${objc_${TARGET}_generated_output_dependecy}" + WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/${TARGET}" + DEPENDS "${OpenCV_SOURCE_DIR}/modules/objc/generator/gen_objc.py" + "${OpenCV_SOURCE_DIR}/modules/python/src2/gen2.py" + "${OpenCV_SOURCE_DIR}/modules/python/src2/hdr_parser.py" + # don't, result of file(WRITE): "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json" + ${deps} + # not allowed (file(WRITE) result): "${CONFIG_FILE}" + COMMENT "Generate files for Objective-C bindings (${TARGET})" + ) + add_custom_target(gen_opencv_objc_source_${TARGET} + # excluded from all: ALL + DEPENDS ${objc_generated_files} ${objc_${TARGET}_generated_output_dependecy} + SOURCES "${OBJC_SOURCE_DIR}/generator/gen_objc.py" + "${OBJC_SOURCE_DIR}/generator/templates/cmakelists.template" + "${CMAKE_CURRENT_BINARY_DIR}/gen_objc.json" + ) + list(APPEND objc_generated_targets gen_opencv_objc_source_${TARGET}) +endmacro() + +if(OPENCV_OBJC_TARGET) + ocv_add_objc_generated_target(${OPENCV_OBJC_TARGET}) +else() + ocv_add_objc_generated_target(osx) + ocv_add_objc_generated_target(ios) +endif() + +add_custom_target(gen_opencv_objc_source + # excluded from all: ALL + DEPENDS ${objc_generated_targets} ) diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py index e6637a7c4c..c20251d261 100755 --- a/modules/objc/generator/gen_objc.py +++ b/modules/objc/generator/gen_objc.py @@ -1342,7 +1342,7 @@ typedef NS_ENUM(int, {2}) {{ return "Ptr<" + fullname + ">" return fullname - def finalize(self, output_objc_path): + def finalize(self, objc_target, output_objc_path, output_objc_build_path): opencv_header_file = os.path.join(output_objc_path, framework_name + ".h") opencv_header = "#import \n\n" opencv_header += "// ! Project version number\nFOUNDATION_EXPORT double " + framework_name + "VersionNumber;\n\n" @@ -1356,15 +1356,15 @@ typedef NS_ENUM(int, {2}) {{ opencv_modulemap += "\n export *\n module * {export *}\n}\n" self.save(opencv_modulemap_file, opencv_modulemap) cmakelist_template = read_contents(os.path.join(SCRIPT_DIR, 'templates/cmakelists.template')) - cmakelist = Template(cmakelist_template).substitute(modules = ";".join(modules), framework = framework_name) + cmakelist = Template(cmakelist_template).substitute(modules = ";".join(modules), framework = framework_name, objc_target=objc_target) self.save(os.path.join(dstdir, "CMakeLists.txt"), cmakelist) - mkdir_p("./framework_build") - mkdir_p("./test_build") - mkdir_p("./doc_build") + mkdir_p(os.path.join(output_objc_build_path, "framework_build")) + mkdir_p(os.path.join(output_objc_build_path, "test_build")) + mkdir_p(os.path.join(output_objc_build_path, "doc_build")) with open(os.path.join(SCRIPT_DIR, '../doc/README.md')) as readme_in: readme_body = readme_in.read() readme_body += "\n\n\n##Modules\n\n" + ", ".join(["`" + m.capitalize() + "`" for m in modules]) - with open("./doc_build/README.md", "w") as readme_out: + with open(os.path.join(output_objc_build_path, "doc_build/README.md"), "w") as readme_out: readme_out.write(readme_body) if framework_name != "OpenCV": for dirname, dirs, files in os.walk(os.path.join(testdir, "test")): @@ -1513,6 +1513,11 @@ if __name__ == "__main__": config = json.load(f) ROOT_DIR = config['rootdir']; assert os.path.exists(ROOT_DIR) + if 'objc_build_dir' in config: + objc_build_dir = config['objc_build_dir'] + assert os.path.exists(objc_build_dir), objc_build_dir + else: + objc_build_dir = os.getcwd() dstdir = "./gen" testdir = "./test" @@ -1608,6 +1613,6 @@ if __name__ == "__main__": generator.gen(srcfiles, module, dstdir, objc_base_path, common_headers, manual_classes) else: logging.info("No generated code for module: %s", module) - generator.finalize(objc_base_path) + generator.finalize(args.target, objc_base_path, objc_build_dir) print('Generated files: %d (updated %d)' % (total_files, updated_files)) diff --git a/modules/objc/generator/templates/cmakelists.template b/modules/objc/generator/templates/cmakelists.template index 2cfc2474cd..67cacbbfa4 100644 --- a/modules/objc/generator/templates/cmakelists.template +++ b/modules/objc/generator/templates/cmakelists.template @@ -24,7 +24,7 @@ target_include_directories($framework PRIVATE "$${BUILD_ROOT}") target_include_directories($framework PRIVATE "$${BUILD_ROOT}/install/include") target_include_directories($framework PRIVATE "$${BUILD_ROOT}/install/include/opencv2") foreach(m $${MODULES}) - target_include_directories($framework PRIVATE "$${BUILD_ROOT}/modules/objc/gen/objc/$${m}") + target_include_directories($framework PRIVATE "$${BUILD_ROOT}/modules/objc_bindings_generator/$objc_target/gen/objc/$${m}") endforeach() install(TARGETS $framework LIBRARY DESTINATION lib) diff --git a/platforms/ios/build_framework.py b/platforms/ios/build_framework.py index e759072825..5965cd0a96 100755 --- a/platforms/ios/build_framework.py +++ b/platforms/ios/build_framework.py @@ -128,10 +128,10 @@ class Builder: self.makeFramework(outdir, dirs) if self.build_objc_wrapper: if self.run_tests: - check_call([sys.argv[0].replace("build_framework", "run_tests"), "--framework_dir=" + outdir, "--framework_name=" + self.framework_name, dirs[0] + "/modules/objc/test"]) + check_call([sys.argv[0].replace("build_framework", "run_tests"), "--framework_dir=" + outdir, "--framework_name=" + self.framework_name, dirs[0] + "/modules/objc_bindings_generator/{}/test".format(self.getObjcTarget())]) else: print("To run tests call:") - print(sys.argv[0].replace("build_framework", "run_tests") + " --framework_dir=" + outdir + " --framework_name=" + self.framework_name + " " + dirs[0] + "/modules/objc/test") + print(sys.argv[0].replace("build_framework", "run_tests") + " --framework_dir=" + outdir + " --framework_name=" + self.framework_name + " " + dirs[0] + "/modules/objc_bindings_generator/{}/test".format(self.getObjcTarget())) if self.build_docs: check_call([sys.argv[0].replace("build_framework", "build_docs"), dirs[0] + "/modules/objc/framework_build"]) doc_path = os.path.join(dirs[0], "modules", "objc", "doc_build", "docs") @@ -216,6 +216,10 @@ class Builder: def getInfoPlist(self, builddirs): return os.path.join(builddirs[0], "ios", "Info.plist") + def getObjcTarget(self): + # Obj-C generation target + return 'ios' + def makeCMakeCmd(self, arch, target, dir, cmakeargs = []): toolchain = self.getToolchain(arch, target) cmakecmd = self.getCMakeArgs(arch, target) + \ @@ -255,7 +259,7 @@ class Builder: execute(buildcmd + ["-target", "ALL_BUILD", "build"], cwd = builddir) execute(["cmake", "-DBUILD_TYPE=%s" % self.getConfiguration(), "-P", "cmake_install.cmake"], cwd = builddir) if self.build_objc_wrapper: - cmakecmd = self.makeCMakeCmd(arch, target, builddir + "/modules/objc/gen", cmakeargs) + cmakecmd = self.makeCMakeCmd(arch, target, builddir + "/modules/objc_bindings_generator/{}/gen".format(self.getObjcTarget()), cmakeargs) cmakecmd.append("-DBUILD_ROOT=%s" % builddir) cmakecmd.append("-DCMAKE_INSTALL_NAME_TOOL=install_name_tool") cmakecmd.append("--no-warn-unused-cli") diff --git a/platforms/osx/build_framework.py b/platforms/osx/build_framework.py index ccca582615..de13e665fa 100755 --- a/platforms/osx/build_framework.py +++ b/platforms/osx/build_framework.py @@ -14,6 +14,10 @@ MACOSX_DEPLOYMENT_TARGET='10.12' # default, can be changed via command line opt class OSXBuilder(Builder): + def getObjcTarget(self): + # Obj-C generation target + return 'osx' + def getToolchain(self, arch, target): return None From f58f36dc88166060a1b1a65f5f4debeb379061f9 Mon Sep 17 00:00:00 2001 From: Anton Veselskyi Date: Sat, 14 Nov 2020 23:29:51 +0200 Subject: [PATCH 094/152] Changed sample code, fixes #18807 --- samples/cpp/stitching.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/cpp/stitching.cpp b/samples/cpp/stitching.cpp index 5bf34f45b1..7de0536452 100644 --- a/samples/cpp/stitching.cpp +++ b/samples/cpp/stitching.cpp @@ -52,7 +52,7 @@ void printUsage(char** argv) " for stitching materials under affine transformation, such as scans.\n" " --output \n" " The default is 'result.jpg'.\n\n" - "Example usage :\n" << argv[0] << " --d3 --try_use_gpu yes --mode scans img1.jpg img2.jpg\n"; + "Example usage :\n" << argv[0] << " --d3 --mode scans img1.jpg img2.jpg\n"; } From 90bea15e353db50a6e183c2b9b347b2158cc2041 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 14 Nov 2020 19:05:10 +0000 Subject: [PATCH 095/152] objc: robust code generation - the same generated code from Python2/3 - avoid randomized output due to unpredictable dict/set order --- modules/objc/generator/gen_objc.py | 49 ++++++++++++++++-------------- 1 file changed, 26 insertions(+), 23 deletions(-) diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py index e6637a7c4c..1352562c3a 100755 --- a/modules/objc/generator/gen_objc.py +++ b/modules/objc/generator/gen_objc.py @@ -111,7 +111,7 @@ T_OBJC_MODULE_BODY = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_modu class GeneralInfo(): def __init__(self, type, decl, namespaces): - self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces) + self.symbol_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces) # parse doxygen comments self.params={} @@ -149,13 +149,13 @@ class GeneralInfo(): break pieces = localName.split(".") if len(pieces) > 2: # ... - return spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1] + return name, spaceName, ".".join(pieces[:-1]), pieces[-2], pieces[-1] elif len(pieces) == 2: # . - return spaceName, pieces[0], pieces[0], pieces[1] + return name, spaceName, pieces[0], pieces[0], pieces[1] elif len(pieces) == 1: # - return spaceName, "", "", pieces[0] + return name, spaceName, "", "", pieces[0] else: - return spaceName, "", "" # error?! + return name, spaceName, "", "" # error?! def fullName(self, isCPP=False): result = ".".join([self.fullClass(), self.name]) @@ -271,7 +271,7 @@ class ClassInfo(GeneralInfo): def getForwardDeclarations(self, module): enum_decl = [x for x in self.imports if self.isEnum(x) and type_dict[x]["import_module"] != module] - enum_imports = list(set([type_dict[m]["import_module"] for m in enum_decl])) + enum_imports = sorted(list(set([type_dict[m]["import_module"] for m in enum_decl]))) class_decl = [x for x in self.imports if not self.isEnum(x)] return ["#import \"%s.h\"" % c for c in enum_imports] + [""] + ["@class %s;" % c for c in sorted(class_decl)] @@ -293,8 +293,8 @@ class ClassInfo(GeneralInfo): def getAllMethods(self): result = [] - result.extend([fi for fi in sorted(self.methods) if fi.isconstructor]) - result.extend([fi for fi in sorted(self.methods) if not fi.isconstructor]) + result += [fi for fi in self.methods if fi.isconstructor] + result += [fi for fi in self.methods if not fi.isconstructor] return result def addMethod(self, fi): @@ -677,7 +677,7 @@ class ObjectiveCWrapperGenerator(object): self.clear() def clear(self): - self.namespaces = set(["cv"]) + self.namespaces = ["cv"] mat_class_info = ClassInfo([ 'class Mat', '', [], [] ], self.namespaces) mat_class_info.namespace = "cv" self.classes = { "Mat" : mat_class_info } @@ -846,9 +846,9 @@ class ObjectiveCWrapperGenerator(object): includes.append('#include "' + hdr + '"') for hdr in srcfiles: decls = parser.parse(hdr) - self.namespaces = parser.namespaces + self.namespaces = sorted(parser.namespaces) logging.info("\n\n===== Header: %s =====", hdr) - logging.info("Namespaces: %s", parser.namespaces) + logging.info("Namespaces: %s", sorted(parser.namespaces)) if decls: includes.append('#include "' + hdr + '"') else: @@ -872,7 +872,7 @@ class ObjectiveCWrapperGenerator(object): mkdir_p(package_path) extension_file = "%s/%s/%sExt.swift" % (output_objc_path, module, self.Module) - for ci in list(self.classes.values()): + for ci in sorted(self.classes.values(), key=lambda x: x.symbol_id): if ci.name == "Mat": continue ci.initCodeStreams(self.Module) @@ -898,7 +898,7 @@ class ObjectiveCWrapperGenerator(object): report.write("\n".join(self.ported_func_list)) report.write("\n\nSKIPPED FUNCs LIST (%i of %i):\n\n" % (len(self.skipped_func_list), total_count)) report.write("".join(self.skipped_func_list)) - for i in list(self.def_args_hist.keys()): + for i in sorted(self.def_args_hist.keys()): report.write("\n%i def args - %i funcs" % (i, self.def_args_hist[i])) return report.getvalue() @@ -1211,17 +1211,18 @@ $unrefined_call$epilogue$ret if ci.consts: enumTypes = set([c.enumType for c in ci.consts]) grouped_consts = {enumType: [c for c in ci.consts if c.enumType == enumType] for enumType in enumTypes} - for typeName, consts in list(grouped_consts.items()): + for typeName in sorted(grouped_consts.keys(), key=lambda x: str(x) if x is not None else ""): + consts = grouped_consts[typeName] logging.info("%s", consts) if typeName: - typeName = typeName.rsplit(".", 1)[-1] + typeNameShort = typeName.rsplit(".", 1)[-1] if ci.cname in enum_fix: - typeName = enum_fix[ci.cname].get(typeName, typeName) + typeNameShort = enum_fix[ci.cname].get(typeNameShort, typeNameShort) ci.enum_declarations.write(""" -// C++: enum {1} -typedef NS_ENUM(int, {2}) {{ - {0}\n}};\n\n""".format(",\n ".join(["%s = %s" % (c.name, c.value) for c in consts]), typeName, typeName) +// C++: enum {1} ({2}) +typedef NS_ENUM(int, {1}) {{ + {0}\n}};\n\n""".format(",\n ".join(["%s = %s" % (c.name, c.value) for c in consts]), typeNameShort, typeName) ) else: if not wrote_consts_pragma: @@ -1303,9 +1304,11 @@ typedef NS_ENUM(int, {2}) {{ # manual ports if ci.name in ManualFuncs: - for func in list(ManualFuncs[ci.name].keys()): - ci.method_declarations.write( "\n".join(ManualFuncs[ci.name][func]["declaration"]) ) - ci.method_implementations.write( "\n".join(ManualFuncs[ci.name][func]["implementation"]) ) + for func in sorted(ManualFuncs[ci.name].keys()): + logging.info("manual function: %s", func) + fn = ManualFuncs[ci.name][func] + ci.method_declarations.write( "\n".join(fn["declaration"]) ) + ci.method_implementations.write( "\n".join(fn["implementation"]) ) def getClass(self, classname): return self.classes[classname or self.Module] @@ -1489,7 +1492,7 @@ if __name__ == "__main__": # initialize logger logging.basicConfig(filename='gen_objc.log', format=None, filemode='w', level=logging.INFO) handler = logging.StreamHandler() - handler.setLevel(logging.WARNING) + handler.setLevel(os.environ.get('LOG_LEVEL', logging.WARNING)) logging.getLogger().addHandler(handler) # parse command line parameters From cfbdbffcad2b599b402828d0b1ba93cff9a4790c Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 14 Nov 2020 22:04:57 +0000 Subject: [PATCH 096/152] objc: rework headers import - 'AdditionalImports' can be removed from misc/objc/gen_dict.json - avoid using of legacy 'opencv.hpp' --- modules/core/misc/objc/common/Converters.h | 2 +- modules/core/misc/objc/common/CvType.h | 2 +- modules/core/misc/objc/common/DMatch.h | 2 +- modules/core/misc/objc/common/Double2.h | 2 +- modules/core/misc/objc/common/Double3.h | 2 +- modules/core/misc/objc/common/Float4.h | 2 +- modules/core/misc/objc/common/Float6.h | 2 +- modules/core/misc/objc/common/Int4.h | 2 +- modules/core/misc/objc/common/KeyPoint.h | 2 +- modules/core/misc/objc/common/Mat.h | 2 +- .../core/misc/objc/common/MinMaxLocResult.h | 2 +- modules/core/misc/objc/common/Point2d.h | 2 +- modules/core/misc/objc/common/Point2f.h | 2 +- modules/core/misc/objc/common/Point2i.h | 2 +- modules/core/misc/objc/common/Point3d.h | 2 +- modules/core/misc/objc/common/Point3f.h | 2 +- modules/core/misc/objc/common/Point3i.h | 2 +- modules/core/misc/objc/common/Range.h | 2 +- modules/core/misc/objc/common/Rect2d.h | 2 +- modules/core/misc/objc/common/Rect2f.h | 2 +- modules/core/misc/objc/common/Rect2i.h | 2 +- modules/core/misc/objc/common/RotatedRect.h | 2 +- modules/core/misc/objc/common/Scalar.h | 2 +- modules/core/misc/objc/common/Size2d.h | 2 +- modules/core/misc/objc/common/Size2f.h | 2 +- modules/core/misc/objc/common/Size2i.h | 2 +- modules/core/misc/objc/common/TermCriteria.h | 2 +- .../imgcodecs/misc/objc/ios/Mat+Converters.h | 4 +- .../misc/objc/macosx/Mat+Converters.h | 4 +- modules/imgproc/misc/objc/common/Moments.h | 2 +- modules/objc/generator/gen_objc.py | 46 +++++++++++++++---- .../templates/objc_class_header.template | 2 +- .../templates/objc_module_header.template | 2 +- 33 files changed, 70 insertions(+), 44 deletions(-) diff --git a/modules/core/misc/objc/common/Converters.h b/modules/core/misc/objc/common/Converters.h index 9a238deb82..29d1b91eb5 100755 --- a/modules/core/misc/objc/common/Converters.h +++ b/modules/core/misc/objc/common/Converters.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import +#import #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/CvType.h b/modules/core/misc/objc/common/CvType.h index fb6f86aa48..b1fd71d487 100644 --- a/modules/core/misc/objc/common/CvType.h +++ b/modules/core/misc/objc/common/CvType.h @@ -5,7 +5,7 @@ // #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/DMatch.h b/modules/core/misc/objc/common/DMatch.h index 51bed493b8..91c2c59bfa 100644 --- a/modules/core/misc/objc/common/DMatch.h +++ b/modules/core/misc/objc/common/DMatch.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Double2.h b/modules/core/misc/objc/common/Double2.h index 2162acb6d0..8e46c883d0 100644 --- a/modules/core/misc/objc/common/Double2.h +++ b/modules/core/misc/objc/common/Double2.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Double3.h b/modules/core/misc/objc/common/Double3.h index 2aaba9af80..5c741648f7 100644 --- a/modules/core/misc/objc/common/Double3.h +++ b/modules/core/misc/objc/common/Double3.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Float4.h b/modules/core/misc/objc/common/Float4.h index 2a89278040..c78e88b72e 100644 --- a/modules/core/misc/objc/common/Float4.h +++ b/modules/core/misc/objc/common/Float4.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Float6.h b/modules/core/misc/objc/common/Float6.h index d2ec19a60e..7e09772c5c 100644 --- a/modules/core/misc/objc/common/Float6.h +++ b/modules/core/misc/objc/common/Float6.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Int4.h b/modules/core/misc/objc/common/Int4.h index 1a17266572..11cc12db14 100644 --- a/modules/core/misc/objc/common/Int4.h +++ b/modules/core/misc/objc/common/Int4.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/KeyPoint.h b/modules/core/misc/objc/common/KeyPoint.h index 547960dc9d..096a1089c9 100644 --- a/modules/core/misc/objc/common/KeyPoint.h +++ b/modules/core/misc/objc/common/KeyPoint.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Mat.h b/modules/core/misc/objc/common/Mat.h index 229337f524..fd1dce27ba 100644 --- a/modules/core/misc/objc/common/Mat.h +++ b/modules/core/misc/objc/common/Mat.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/MinMaxLocResult.h b/modules/core/misc/objc/common/MinMaxLocResult.h index e8daed4cc3..5ec6029e31 100644 --- a/modules/core/misc/objc/common/MinMaxLocResult.h +++ b/modules/core/misc/objc/common/MinMaxLocResult.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Point2d.h b/modules/core/misc/objc/common/Point2d.h index dbb8d55efa..0426b11d9a 100644 --- a/modules/core/misc/objc/common/Point2d.h +++ b/modules/core/misc/objc/common/Point2d.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Point2f.h b/modules/core/misc/objc/common/Point2f.h index 0da4fba5d8..6d13c774d8 100644 --- a/modules/core/misc/objc/common/Point2f.h +++ b/modules/core/misc/objc/common/Point2f.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Point2i.h b/modules/core/misc/objc/common/Point2i.h index 9e5d74624a..e43ee3a8ec 100644 --- a/modules/core/misc/objc/common/Point2i.h +++ b/modules/core/misc/objc/common/Point2i.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Point3d.h b/modules/core/misc/objc/common/Point3d.h index 72b0d39ea8..618ded35fa 100644 --- a/modules/core/misc/objc/common/Point3d.h +++ b/modules/core/misc/objc/common/Point3d.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Point3f.h b/modules/core/misc/objc/common/Point3f.h index 2370fffeaa..c98add1cec 100644 --- a/modules/core/misc/objc/common/Point3f.h +++ b/modules/core/misc/objc/common/Point3f.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Point3i.h b/modules/core/misc/objc/common/Point3i.h index b0edeaa470..9eab2ee0ea 100644 --- a/modules/core/misc/objc/common/Point3i.h +++ b/modules/core/misc/objc/common/Point3i.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Range.h b/modules/core/misc/objc/common/Range.h index dd84edf6aa..df0c01398f 100644 --- a/modules/core/misc/objc/common/Range.h +++ b/modules/core/misc/objc/common/Range.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Rect2d.h b/modules/core/misc/objc/common/Rect2d.h index ba91509b77..0ffcae9ab6 100644 --- a/modules/core/misc/objc/common/Rect2d.h +++ b/modules/core/misc/objc/common/Rect2d.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Rect2f.h b/modules/core/misc/objc/common/Rect2f.h index 6a8863800f..1f44f56263 100644 --- a/modules/core/misc/objc/common/Rect2f.h +++ b/modules/core/misc/objc/common/Rect2f.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Rect2i.h b/modules/core/misc/objc/common/Rect2i.h index 2e4e55cf30..6ed86d50bd 100644 --- a/modules/core/misc/objc/common/Rect2i.h +++ b/modules/core/misc/objc/common/Rect2i.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/RotatedRect.h b/modules/core/misc/objc/common/RotatedRect.h index c94053b6c1..a2049e6bf0 100644 --- a/modules/core/misc/objc/common/RotatedRect.h +++ b/modules/core/misc/objc/common/RotatedRect.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Scalar.h b/modules/core/misc/objc/common/Scalar.h index 63c3d1de58..d565155010 100644 --- a/modules/core/misc/objc/common/Scalar.h +++ b/modules/core/misc/objc/common/Scalar.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Size2d.h b/modules/core/misc/objc/common/Size2d.h index 11c6c50a02..cd2e4e4bc0 100644 --- a/modules/core/misc/objc/common/Size2d.h +++ b/modules/core/misc/objc/common/Size2d.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Size2f.h b/modules/core/misc/objc/common/Size2f.h index 2d1f2865c3..73ae9a2da0 100644 --- a/modules/core/misc/objc/common/Size2f.h +++ b/modules/core/misc/objc/common/Size2f.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/Size2i.h b/modules/core/misc/objc/common/Size2i.h index 61aa8da885..cd74e2c84a 100644 --- a/modules/core/misc/objc/common/Size2i.h +++ b/modules/core/misc/objc/common/Size2i.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/core/misc/objc/common/TermCriteria.h b/modules/core/misc/objc/common/TermCriteria.h index c7396582b2..ff6bfd565c 100644 --- a/modules/core/misc/objc/common/TermCriteria.h +++ b/modules/core/misc/objc/common/TermCriteria.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/imgcodecs/misc/objc/ios/Mat+Converters.h b/modules/imgcodecs/misc/objc/ios/Mat+Converters.h index d33abbf4f9..8c185f884a 100644 --- a/modules/imgcodecs/misc/objc/ios/Mat+Converters.h +++ b/modules/imgcodecs/misc/objc/ios/Mat+Converters.h @@ -7,14 +7,14 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif +#import "Mat.h" #import #import -#import "Mat.h" NS_ASSUME_NONNULL_BEGIN diff --git a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h index 4abf806d1e..d87887372d 100644 --- a/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h +++ b/modules/imgcodecs/misc/objc/macosx/Mat+Converters.h @@ -7,14 +7,14 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif +#import "Mat.h" #import #import -#import "Mat.h" NS_ASSUME_NONNULL_BEGIN diff --git a/modules/imgproc/misc/objc/common/Moments.h b/modules/imgproc/misc/objc/common/Moments.h index dfa5653bac..8ce3f75ea8 100644 --- a/modules/imgproc/misc/objc/common/Moments.h +++ b/modules/imgproc/misc/objc/common/Moments.h @@ -7,7 +7,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +#import "opencv2/core.hpp" #else #define CV_EXPORTS #endif diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py index e6637a7c4c..a5ffa7e874 100755 --- a/modules/objc/generator/gen_objc.py +++ b/modules/objc/generator/gen_objc.py @@ -104,6 +104,15 @@ def mkdir_p(path): else: raise +def header_import(hdr): + """ converts absolute header path to import parameter """ + pos = hdr.find('/include/') + hdr = hdr[pos+9 if pos >= 0 else 0:] + #pos = hdr.find('opencv2/') + #hdr = hdr[pos+8 if pos >= 0 else 0:] + return hdr + + T_OBJC_CLASS_HEADER = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_class_header.template')) T_OBJC_CLASS_BODY = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_class_body.template')) T_OBJC_MODULE_HEADER = read_contents(os.path.join(SCRIPT_DIR, 'templates/objc_module_header.template')) @@ -693,17 +702,17 @@ class ObjectiveCWrapperGenerator(object): classinfo = ClassInfo(decl, namespaces=self.namespaces) if classinfo.name in class_ignore_list: logging.info('ignored: %s', classinfo) - return + return None if classinfo.name != self.Module: self.classes[self.Module].member_classes.append(classinfo.objc_name) name = classinfo.cname if self.isWrapped(name) and not classinfo.base: logging.warning('duplicated: %s', classinfo) - return + return None self.classes[name] = classinfo if name in type_dict and not classinfo.base: logging.warning('duplicated: %s', classinfo) - return + return None if name != self.Module: type_dict.setdefault(name, {}).update( { "objc_type" : classinfo.objc_name + "*", @@ -731,6 +740,7 @@ class ObjectiveCWrapperGenerator(object): ) logging.info('ok: class %s, name: %s, base: %s', classinfo, name, classinfo.base) + return classinfo def add_const(self, decl, scope=None, enumType=None): # [ "const cname", val, [], [] ] constinfo = ConstInfo(decl, namespaces=self.namespaces, enumType=enumType) @@ -837,27 +847,30 @@ class ObjectiveCWrapperGenerator(object): # TODO: support UMat versions of declarations (implement UMat-wrapper for Java) parser = hdr_parser.CppHeaderParser(generate_umat_decls=False) - self.add_class( ['class ' + self.Module, '', [], []]) # [ 'class/struct cname', ':bases', [modlist] [props] ] + module_ci = self.add_class( ['class ' + self.Module, '', [], []]) # [ 'class/struct cname', ':bases', [modlist] [props] ] + module_ci.header_import = module + '.hpp' # scan the headers and build more descriptive maps of classes, consts, functions includes = [] for hdr in common_headers: logging.info("\n===== Common header : %s =====", hdr) - includes.append('#include "' + hdr + '"') + includes.append(header_import(hdr)) for hdr in srcfiles: decls = parser.parse(hdr) self.namespaces = parser.namespaces logging.info("\n\n===== Header: %s =====", hdr) logging.info("Namespaces: %s", parser.namespaces) if decls: - includes.append('#include "' + hdr + '"') + includes.append(header_import(hdr)) else: logging.info("Ignore header: %s", hdr) for decl in decls: logging.info("\n--- Incoming ---\n%s", pformat(decl[:5], 4)) # without docstring name = decl[0] if name.startswith("struct") or name.startswith("class"): - self.add_class(decl) + ci = self.add_class(decl) + if ci: + ci.header_import = header_import(hdr) elif name.startswith("const"): self.add_const(decl) elif name.startswith("enum"): @@ -1190,13 +1203,26 @@ $unrefined_call$epilogue$ret def gen_class(self, ci, module, extension_implementations, extension_signatures): logging.info("%s", ci) - if module in AdditionalImports and (ci.name in AdditionalImports[module] or "*" in AdditionalImports[module]): - additional_imports = [] + additional_imports = [] + if module in AdditionalImports: if "*" in AdditionalImports[module]: additional_imports += AdditionalImports[module]["*"] if ci.name in AdditionalImports[module]: additional_imports += AdditionalImports[module][ci.name] - ci.additionalImports.write("\n".join(["#import %s" % h for h in additional_imports])) + if hasattr(ci, 'header_import'): + h = '"{}"'.format(ci.header_import) + if not h in additional_imports: + additional_imports.append(h) + + h = '"{}.hpp"'.format(module) + if h in additional_imports: + additional_imports.remove(h) + h = '"opencv2/{}.hpp"'.format(module) + if not h in additional_imports: + additional_imports.insert(0, h) + + if additional_imports: + ci.additionalImports.write('\n'.join(['#import %s' % h for h in additional_imports])) # constants wrote_consts_pragma = False diff --git a/modules/objc/generator/templates/objc_class_header.template b/modules/objc/generator/templates/objc_class_header.template index 0bad670685..77697e8c93 100644 --- a/modules/objc/generator/templates/objc_class_header.template +++ b/modules/objc/generator/templates/objc_class_header.template @@ -4,7 +4,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +//#import "opencv.hpp" $additionalImports #else #define CV_EXPORTS diff --git a/modules/objc/generator/templates/objc_module_header.template b/modules/objc/generator/templates/objc_module_header.template index fa9e7df6a3..88f45a11cf 100644 --- a/modules/objc/generator/templates/objc_module_header.template +++ b/modules/objc/generator/templates/objc_module_header.template @@ -4,7 +4,7 @@ #pragma once #ifdef __cplusplus -#import "opencv.hpp" +//#import "opencv.hpp" $additionalImports #else #define CV_EXPORTS From 24f2b7dd3f4a0c756d0ca922df4a089eae9d777d Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 16 Nov 2020 05:22:45 +0000 Subject: [PATCH 097/152] objc(test): repair binary resource files --- modules/objc/generator/gen_objc.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py index e6637a7c4c..61a89d1d67 100755 --- a/modules/objc/generator/gen_objc.py +++ b/modules/objc/generator/gen_objc.py @@ -1368,6 +1368,8 @@ typedef NS_ENUM(int, {2}) {{ readme_out.write(readme_body) if framework_name != "OpenCV": for dirname, dirs, files in os.walk(os.path.join(testdir, "test")): + if dirname.endswith('/resources'): + continue # don't touch resource binary files for filename in files: filepath = os.path.join(dirname, filename) with io.open(filepath, encoding="utf-8", errors="ignore") as file: From dde3cb3b99d83b1e0cf9691f91aa1d6bfa61f808 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 16 Nov 2020 04:30:45 +0000 Subject: [PATCH 098/152] java: workaround handling of base class --- modules/java/generator/gen_java.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/modules/java/generator/gen_java.py b/modules/java/generator/gen_java.py index 6c604ed04b..e41117558a 100755 --- a/modules/java/generator/gen_java.py +++ b/modules/java/generator/gen_java.py @@ -272,8 +272,16 @@ class ClassInfo(GeneralInfo): self.base = '' if decl[1]: - #self.base = re.sub(r"\b"+self.jname+r"\b", "", decl[1].replace(":", "")).strip() - self.base = re.sub(r"^.*:", "", decl[1].split(",")[0]).strip().replace(self.jname, "") + # FIXIT Use generator to find type properly instead of hacks below + base_class = re.sub(r"^: ", "", decl[1]) + base_class = re.sub(r"^cv::", "", base_class) + base_class = base_class.replace('::', '.') + base_info = ClassInfo(('class {}'.format(base_class), '', [], [], None, None), [self.namespace]) + base_type_name = base_info.name + if not base_type_name in type_dict: + base_type_name = re.sub(r"^.*:", "", decl[1].split(",")[0]).strip().replace(self.jname, "") + self.base = base_type_name + self.addImports(self.base) def __repr__(self): return Template("CLASS $namespace::$classpath.$name : $base").substitute(**self.__dict__) From 58268b6eef993383fd0ff41fd1a7664983d17a72 Mon Sep 17 00:00:00 2001 From: Ruslan Garnov Date: Wed, 28 Oct 2020 21:52:53 +0300 Subject: [PATCH 099/152] Added ND GMatDesc serialization test --- modules/gapi/test/s11n/gapi_s11n_tests.cpp | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/modules/gapi/test/s11n/gapi_s11n_tests.cpp b/modules/gapi/test/s11n/gapi_s11n_tests.cpp index 2fc1e46253..e332552b1d 100644 --- a/modules/gapi/test/s11n/gapi_s11n_tests.cpp +++ b/modules/gapi/test/s11n/gapi_s11n_tests.cpp @@ -365,6 +365,12 @@ TEST_F(S11N_Basic, Test_MatDesc) { EXPECT_EQ(v, get()); } +TEST_F(S11N_Basic, Test_MatDescND) { + cv::GMatDesc v = { CV_8U, {1,1,224,224} }; + put(v); + EXPECT_EQ(v, get()); +} + TEST_F(S11N_Basic, Test_MetaArg_MatDesc) { cv::GMatDesc desc = { CV_8U, 1,{ 320,240 } }; auto v = cv::GMetaArg{ desc }; From 06477743abd51f6e2eba774fcb8204a007e87435 Mon Sep 17 00:00:00 2001 From: Maxim Pashchenkov Date: Mon, 16 Nov 2020 22:24:55 +0300 Subject: [PATCH 100/152] Merge pull request #18744 from mpashchenkov:mp/onnx-dynamic-input-tensor G-API: ONNX. Support tensor input for CNN with dynamic input * Added support for dynamic input tensor, refactored one input/output tests * Added multiple input/output fixture, test for mobilenet * Removed whitespace * Removed mistake in inferROI * Small fixes * One more fix * Code cleanup * Code cleanup X2 * bb rstrt * Fix review comments * One more fix review comments * Mistake --- .../gapi/src/backends/onnx/gonnxbackend.cpp | 12 +- .../gapi/test/infer/gapi_infer_onnx_test.cpp | 501 ++++++++++++------ 2 files changed, 358 insertions(+), 155 deletions(-) diff --git a/modules/gapi/src/backends/onnx/gonnxbackend.cpp b/modules/gapi/src/backends/onnx/gonnxbackend.cpp index c81e032969..7ab386ecab 100644 --- a/modules/gapi/src/backends/onnx/gonnxbackend.cpp +++ b/modules/gapi/src/backends/onnx/gonnxbackend.cpp @@ -167,8 +167,16 @@ inline void preprocess(const cv::Mat& src, // No layout or dimension transformations done here! // TODO: This needs to be aligned across all NN backends. GAPI_Assert(toCV(ti.type) == CV_32F && "Only 32F model input is supported for 32F data"); - GAPI_Assert(toORT(src.size) == ti.dims && "32F tensor dimensions should match with NN input"); - GAPI_Assert(!ti.is_dynamic && "Dynamic inputs are not supported for this case"); + const auto tensor_dims = toORT(src.size); + if (tensor_dims.size() == ti.dims.size()) { + for (size_t i = 0; i < ti.dims.size(); ++i) { + GAPI_Assert((ti.dims[i] == -1 || ti.dims[i] == tensor_dims[i]) && + "32F tensor dimensions should match with all non-dynamic NN input dimensions"); + } + } else { + GAPI_Assert(false && "32F tensor size should match with NN input"); + } + dst = src; } else { // 8U input: full preprocessing path diff --git a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp index ebb8020e9a..782e1b093a 100644 --- a/modules/gapi/test/infer/gapi_infer_onnx_test.cpp +++ b/modules/gapi/test/infer/gapi_infer_onnx_test.cpp @@ -12,29 +12,26 @@ #include #include +#include #include namespace { - struct ONNXInitPath { ONNXInitPath() { const char* env_path = getenv("OPENCV_GAPI_ONNX_MODEL_PATH"); - if (env_path) + if (env_path) { cvtest::addDataSearchPath(env_path); + } } }; static ONNXInitPath g_init_path; -cv::Mat initMatrixRandU(int type, cv::Size sz_in) -{ - cv::Mat in_mat1 = cv::Mat(sz_in, type); +cv::Mat initMatrixRandU(const int type, const cv::Size& sz_in) { + const cv::Mat in_mat1 = cv::Mat(sz_in, type); - if (CV_MAT_DEPTH(type) < CV_32F) - { + if (CV_MAT_DEPTH(type) < CV_32F) { cv::randu(in_mat1, cv::Scalar::all(0), cv::Scalar::all(255)); - } - else - { + } else { const int fscale = 256; // avoid bits near ULP, generate stable test input cv::Mat in_mat32s(in_mat1.size(), CV_MAKE_TYPE(CV_32S, CV_MAT_CN(type))); cv::randu(in_mat32s, cv::Scalar::all(0), cv::Scalar::all(255 * fscale)); @@ -42,111 +39,238 @@ cv::Mat initMatrixRandU(int type, cv::Size sz_in) } return in_mat1; } -} +} // anonymous namespace namespace opencv_test { namespace { // FIXME: taken from the DNN module -void normAssert(cv::InputArray ref, cv::InputArray test, +void normAssert(const cv::InputArray& ref, const cv::InputArray& test, const char *comment /*= ""*/, - double l1 = 0.00001, double lInf = 0.0001) -{ - double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); + const double l1 = 0.00001, const double lInf = 0.0001) { + const double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total(); EXPECT_LE(normL1, l1) << comment; - double normInf = cvtest::norm(ref, test, cv::NORM_INF); + const double normInf = cvtest::norm(ref, test, cv::NORM_INF); EXPECT_LE(normInf, lInf) << comment; } -std::string findModel(const std::string &model_name) -{ - return findDataFile("vision/classification/squeezenet/model/" + model_name + ".onnx", false); +inline std::string findModel(const std::string &model_name) { + return findDataFile("vision/" + model_name + ".onnx", false); } -inline void preprocess(const cv::Mat& src, - cv::Mat& dst, - const cv::Scalar& mean, - const cv::Scalar& std) { - int new_h = 224; - int new_w = 224; - cv::Mat tmp, nmat, cvt; - cv::resize(src, dst, cv::Size(new_w, new_h)); - dst.convertTo(cvt, CV_32F, 1.f / 255); - nmat = cvt - mean; - tmp = nmat / std; - dst.create(cv::Size(new_w, new_h * src.channels()), CV_32F); +inline void toCHW(const cv::Mat& src, cv::Mat& dst) { + dst.create(cv::Size(src.cols, src.rows * src.channels()), CV_32F); std::vector planes; for (int i = 0; i < src.channels(); ++i) { - planes.push_back(dst.rowRange(i * new_h, (i + 1) * new_h)); + planes.push_back(dst.rowRange(i * src.rows, (i + 1) * src.rows)); } - cv::split(tmp, planes); + cv::split(src, planes); } -void InferONNX(const std::string& model_path, - const cv::Mat& in, - cv::Mat& out, - const cv::Scalar& mean, - const cv::Scalar& std) -{ - // FIXME: It must be a FIXTURE test! - Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test"); - Ort::SessionOptions session_options; - Ort::Session session(env, model_path.data(), session_options); - auto input_node_dims = // 0 - one input - session.GetInputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape(); - auto output_node_dims = // 0 - one output - session.GetOutputTypeInfo(0).GetTensorTypeAndShapeInfo().GetShape(); +inline int toCV(const ONNXTensorElementDataType prec) { + switch (prec) { + case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: return CV_8U; + case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: return CV_32F; + default: GAPI_Assert(false && "Unsupported data type"); + } + return -1; +} + +inline std::vector toORT(const cv::MatSize &sz) { + return cv::to_own(sz); +} + +inline std::vector getCharNames(const std::vector& names) { + std::vector out_vec; + for (const auto& el : names) { + out_vec.push_back(el.data()); + } + return out_vec; +} + +inline void copyToOut(const cv::Mat& in, cv::Mat& out) { + GAPI_Assert(in.depth() == CV_32F); + GAPI_Assert(in.size == out.size); + const float* const inptr = in.ptr(); + float* const optr = out.ptr(); + const int size = in.total(); + for (int i = 0; i < size; ++i) { + optr[i] = inptr[i]; + } +} + +void remapYolo(const std::unordered_map &onnx, + std::unordered_map &gapi) { + GAPI_Assert(onnx.size() == 1u); + GAPI_Assert(gapi.size() == 1u); + // Result from Run method + const cv::Mat& in = onnx.begin()->second; + // Configured output + cv::Mat& out = gapi.begin()->second; + // Simple copy + copyToOut(in, out); +} + +void remapSsdPorts(const std::unordered_map &onnx, + std::unordered_map &gapi) { + // Result from Run method + const cv::Mat& in_num = onnx.at("num_detections:0"); + const cv::Mat& in_boxes = onnx.at("detection_boxes:0"); + const cv::Mat& in_scores = onnx.at("detection_scores:0"); + const cv::Mat& in_classes = onnx.at("detection_classes:0"); + // Configured outputs + cv::Mat& out_boxes = gapi.at("out1"); + cv::Mat& out_classes = gapi.at("out2"); + cv::Mat& out_scores = gapi.at("out3"); + cv::Mat& out_num = gapi.at("out4"); + // Simple copy for outputs + copyToOut(in_num, out_num); + copyToOut(in_boxes, out_boxes); + copyToOut(in_scores, out_scores); + copyToOut(in_classes, out_classes); +} + +class ONNXtest : public ::testing::Test { +public: + std::string model_path; + size_t num_in, num_out; + std::vector out_gapi; + std::vector out_onnx; + cv::Mat in_mat1; + + ONNXtest() { + env = Ort::Env(ORT_LOGGING_LEVEL_WARNING, "test"); + memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); + out_gapi.resize(1); + out_onnx.resize(1); + // FIXME: All tests chek "random" image + // Ideally it should be a real image + in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); + } + + template + void infer(const std::vector& ins, + std::vector& outs) { + // Prepare session + session = Ort::Session(env, model_path.data(), session_options); + num_in = session.GetInputCount(); + num_out = session.GetOutputCount(); + GAPI_Assert(num_in == ins.size()); + in_node_names.clear(); + out_node_names.clear(); + // Inputs Run params + std::vector in_tensors; + for(size_t i = 0; i < num_in; ++i) { + char* in_node_name_p = session.GetInputName(i, allocator); + in_node_names.push_back(std::string(in_node_name_p)); + allocator.Free(in_node_name_p); + in_node_dims = toORT(ins[i].size); + in_tensors.emplace_back(Ort::Value::CreateTensor(memory_info, + const_cast(ins[i].ptr()), + ins[i].total(), + in_node_dims.data(), + in_node_dims.size())); + } + // Outputs Run params + for(size_t i = 0; i < num_out; ++i) { + char* out_node_name_p = session.GetOutputName(i, allocator); + out_node_names.push_back(std::string(out_node_name_p)); + allocator.Free(out_node_name_p); + } + // Input/output order by names + const auto in_run_names = getCharNames(in_node_names); + const auto out_run_names = getCharNames(out_node_names); + // Run + auto result = session.Run(Ort::RunOptions{nullptr}, + in_run_names.data(), + &in_tensors.front(), + num_in, + out_run_names.data(), + num_out); + // Copy outputs + GAPI_Assert(result.size() == num_out); + outs.resize(num_out); + for (size_t i = 0; i < num_out; ++i) { + const auto info = result[i].GetTensorTypeAndShapeInfo(); + const auto shape = info.GetShape(); + const auto type = info.GetElementType(); + cv::Mat mt(std::vector(shape.begin(), shape.end()), toCV(type), + reinterpret_cast(result[i].GetTensorMutableData())); + mt.copyTo(outs[i]); + } + } + // One input/output overload + template + void infer(const cv::Mat& in, cv::Mat& out) { + std::vector result; + infer({in}, result); + GAPI_Assert(result.size() == 1u); + out = result.front(); + } + + void validate() { + GAPI_Assert(!out_gapi.empty() && !out_onnx.empty()); + ASSERT_EQ(out_gapi.size(), out_onnx.size()); + const auto size = out_gapi.size(); + for (size_t i = 0; i < size; ++i) { + normAssert(out_onnx[i], out_gapi[i], "Test outputs"); + } + } + + void useModel(const std::string& model_name) { + model_path = findModel(model_name); + } +private: + Ort::Env env{nullptr}; + Ort::MemoryInfo memory_info{nullptr}; Ort::AllocatorWithDefaultOptions allocator; - char* in_node_name_p = session.GetInputName(0, allocator); - char* out_node_name_p = session.GetOutputName(0, allocator); - std::string in_node_name(in_node_name_p); - std::string out_node_name(out_node_name_p); - allocator.Free(in_node_name_p); - allocator.Free(out_node_name_p); + Ort::SessionOptions session_options; + Ort::Session session{nullptr}; - auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); - cv::Mat dst; - preprocess(in, dst, mean, std); + std::vector in_node_dims; + std::vector in_node_names; + std::vector out_node_names; +}; - out.create(std::vector(output_node_dims.begin(), - output_node_dims.end()), CV_32F); // empty output Mat - auto in_tensor = Ort::Value::CreateTensor(memory_info, - dst.ptr(), - dst.total(), - input_node_dims.data(), - input_node_dims.size()); - auto out_tensor = Ort::Value::CreateTensor(memory_info, - out.ptr(), - out.total(), - output_node_dims.data(), - output_node_dims.size()); - std::vector in_names = {in_node_name.data()}; - std::vector out_names = {out_node_name.data()}; - session.Run(Ort::RunOptions{nullptr}, - in_names.data(), - &in_tensor, - session.GetInputCount(), - out_names.data(), - &out_tensor, - session.GetOutputCount()); -} +class ONNXClassificationTest : public ONNXtest { +public: + const cv::Scalar mean = { 0.485, 0.456, 0.406 }; + const cv::Scalar std = { 0.229, 0.224, 0.225 }; + void preprocess(const cv::Mat& src, cv::Mat& dst) { + const int new_h = 224; + const int new_w = 224; + cv::Mat tmp, cvt, rsz; + cv::resize(src, rsz, cv::Size(new_w, new_h)); + rsz.convertTo(cvt, CV_32F, 1.f / 255); + tmp = (cvt - mean) / std; + toCHW(tmp, dst); + dst = dst.reshape(1, {1, 3, new_h, new_w}); + } +}; + +class ONNXGRayScaleTest : public ONNXtest { +public: + void preprocess(const cv::Mat& src, cv::Mat& dst) { + const int new_h = 64; + const int new_w = 64; + cv::Mat cvc, rsz, cvt; + cv::cvtColor(src, cvc, cv::COLOR_BGR2GRAY); + cv::resize(cvc, rsz, cv::Size(new_w, new_h)); + rsz.convertTo(cvt, CV_32F); + toCHW(cvt, dst); + dst = dst.reshape(1, {1, 1, new_h, new_w}); + } +}; } // anonymous namespace -TEST(ONNX, Infer) +TEST_F(ONNXClassificationTest, Infer) { - cv::Mat in_mat1, out_gapi, out_onnx; - std::string model_path = findModel("squeezenet1.0-9"); - // NOTE: All tests chek "random" image - // Ideally it should be a real image - in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); - - cv::Scalar mean = { 0.485, 0.456, 0.406 }; - cv::Scalar std = { 0.229, 0.224, 0.225 }; - + useModel("classification/squeezenet/model/squeezenet1.0-9"); // ONNX_API code - InferONNX(model_path, in_mat1, out_onnx, mean, std); - + cv::Mat processed_mat; + preprocess(in_mat1, processed_mat); + infer(processed_mat, out_onnx.front()); // G_API code G_API_NET(SqueezNet, , "squeeznet"); cv::GMat in; @@ -154,125 +278,196 @@ TEST(ONNX, Infer) cv::GComputation comp(cv::GIn(in), cv::GOut(out)); // NOTE: We have to normalize U8 tensor // so cfgMeanStd() is here - auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({ mean }, { std }); comp.apply(cv::gin(in_mat1), - cv::gout(out_gapi), + cv::gout(out_gapi.front()), cv::compile_args(cv::gapi::networks(net))); - // Validate - ASSERT_EQ(1000u, out_onnx.total()); - ASSERT_EQ(1000u, out_gapi.total()); - normAssert(out_onnx, out_gapi, "Test classification output"); + validate(); } -TEST(ONNX, InferROI) +TEST_F(ONNXtest, InferTensor) { - cv::Mat in_mat1, out_gapi, out_onnx; - std::string model_path = findModel("squeezenet1.0-9"); - in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); - - cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean - cv::Scalar std = { 0.229, 0.224, 0.225 }; // squeeznet std - - cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250}); + useModel("classification/squeezenet/model/squeezenet1.0-9"); + // Create tensor + // FIXME: Test cheks "random" image + // Ideally it should be a real image + const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{224, 224}); + const std::vector dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols}; + const cv::Mat tensor(dims, CV_32F, rand_mat.data); // ONNX_API code - InferONNX(model_path, in_mat1(ROI), out_onnx, mean, std); + infer(tensor, out_onnx.front()); + // G_API code + G_API_NET(SqueezNet, , "squeeznet"); + cv::GMat in; + cv::GMat out = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out)); + auto net = cv::gapi::onnx::Params { model_path }; + comp.apply(cv::gin(tensor), + cv::gout(out_gapi.front()), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} +TEST_F(ONNXClassificationTest, InferROI) +{ + useModel("classification/squeezenet/model/squeezenet1.0-9"); + const cv::Rect ROI(cv::Point{0, 0}, cv::Size{250, 250}); + // ONNX_API code + cv::Mat roi_mat; + preprocess(in_mat1(ROI), roi_mat); + infer(roi_mat, out_onnx.front()); // G_API code G_API_NET(SqueezNet, , "squeeznet"); cv::GMat in; cv::GOpaque rect; cv::GMat out = cv::gapi::infer(rect, in); cv::GComputation comp(cv::GIn(in, rect), cv::GOut(out)); - auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + // NOTE: We have to normalize U8 tensor + // so cfgMeanStd() is here + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({ mean }, { std }); comp.apply(cv::gin(in_mat1, ROI), - cv::gout(out_gapi), + cv::gout(out_gapi.front()), cv::compile_args(cv::gapi::networks(net))); - // Validate - ASSERT_EQ(1000u, out_onnx.total()); - ASSERT_EQ(1000u, out_gapi.total()); - normAssert(out_onnx, out_gapi, "Test classification output"); + validate(); } -TEST(ONNX, InferROIList) +TEST_F(ONNXClassificationTest, InferROIList) { - cv::Mat in_mat1; - std::string model_path = findModel("squeezenet1.0-9"); - in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); - - cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean - cv::Scalar std = { 0.229, 0.224, 0.225 }; // squeeznet std - - std::vector rois = { + useModel("classification/squeezenet/model/squeezenet1.0-9"); + const std::vector rois = { cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), }; - std::vector out_gapi; - std::vector out_onnx(rois.size()); // ONNX_API code + out_onnx.resize(rois.size()); for (size_t i = 0; i < rois.size(); ++i) { - InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std); + cv::Mat roi_mat; + preprocess(in_mat1(rois[i]), roi_mat); + infer(roi_mat, out_onnx[i]); } - // G_API code G_API_NET(SqueezNet, , "squeeznet"); cv::GMat in; cv::GArray rr; cv::GArray out = cv::gapi::infer(rr, in); cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out)); - auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + // NOTE: We have to normalize U8 tensor + // so cfgMeanStd() is here + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({ mean }, { std }); comp.apply(cv::gin(in_mat1, rois), cv::gout(out_gapi), cv::compile_args(cv::gapi::networks(net))); - // Validate - for (size_t i = 0; i < rois.size(); ++i) { - ASSERT_EQ(1000u, out_onnx[i].total()); - ASSERT_EQ(1000u, out_gapi[i].total()); - normAssert(out_onnx[i], out_gapi[i], "Test classification output"); - } + validate(); } -TEST(ONNX, Infer2ROIList) +TEST_F(ONNXClassificationTest, Infer2ROIList) { - cv::Mat in_mat1; - std::string model_path = findModel("squeezenet1.0-9"); - in_mat1 = initMatrixRandU(CV_8UC3, cv::Size{640, 480}); - - cv::Scalar mean = { 0.485, 0.456, 0.406 }; // squeeznet mean - cv::Scalar std = { 0.229, 0.224, 0.225 }; // squeeznet std - - std::vector rois = { + useModel("classification/squeezenet/model/squeezenet1.0-9"); + const std::vector rois = { cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}), cv::Rect(cv::Point{50, 100}, cv::Size{250, 360}), }; - std::vector out_gapi; - std::vector out_onnx(rois.size()); // ONNX_API code + out_onnx.resize(rois.size()); for (size_t i = 0; i < rois.size(); ++i) { - InferONNX(model_path, in_mat1(rois[i]), out_onnx[i], mean, std); + cv::Mat roi_mat; + preprocess(in_mat1(rois[i]), roi_mat); + infer(roi_mat, out_onnx[i]); } - // G_API code G_API_NET(SqueezNet, , "squeeznet"); cv::GMat in; cv::GArray rr; - cv::GArray out = cv::gapi::infer2(in,rr); + cv::GArray out = cv::gapi::infer2(in, rr); cv::GComputation comp(cv::GIn(in, rr), cv::GOut(out)); - auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({mean},{std}); + // NOTE: We have to normalize U8 tensor + // so cfgMeanStd() is here + auto net = cv::gapi::onnx::Params { model_path }.cfgMeanStd({ mean }, { std }); comp.apply(cv::gin(in_mat1, rois), cv::gout(out_gapi), cv::compile_args(cv::gapi::networks(net))); - // Validate - for (size_t i = 0; i < rois.size(); ++i) { - ASSERT_EQ(1000u, out_onnx[i].total()); - ASSERT_EQ(1000u, out_gapi[i].total()); - normAssert(out_onnx[i], out_gapi[i], "Test classification output"); - } + validate(); } +TEST_F(ONNXtest, InferDynamicInputTensor) +{ + useModel("object_detection_segmentation/tiny-yolov2/model/tinyyolov2-8"); + // Create tensor + // FIXME: Test cheks "random" image + // Ideally it should be a real image + const cv::Mat rand_mat = initMatrixRandU(CV_32FC3, cv::Size{416, 416}); + const std::vector dims = {1, rand_mat.channels(), rand_mat.rows, rand_mat.cols}; + cv::Mat tensor(dims, CV_32F, rand_mat.data); + const cv::Mat in_tensor = tensor / 255.f; + // ONNX_API code + infer(in_tensor, out_onnx.front()); + // G_API code + G_API_NET(YoloNet, , "YoloNet"); + cv::GMat in; + cv::GMat out = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out)); + auto net = cv::gapi::onnx::Params{model_path} + .cfgPostProc({cv::GMatDesc{CV_32F, {1, 125, 13, 13}}}, remapYolo) + .cfgOutputLayers({"out"}); + comp.apply(cv::gin(in_tensor), + cv::gout(out_gapi.front()), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} + +TEST_F(ONNXGRayScaleTest, InferImage) +{ + useModel("body_analysis/emotion_ferplus/model/emotion-ferplus-8"); + // ONNX_API code + cv::Mat prep_mat; + preprocess(in_mat1, prep_mat); + infer(prep_mat, out_onnx.front()); + // G_API code + G_API_NET(EmotionNet, , "emotion-ferplus"); + cv::GMat in; + cv::GMat out = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out)); + auto net = cv::gapi::onnx::Params { model_path } + .cfgNormalize({ false }); // model accepts 0..255 range in FP32; + comp.apply(cv::gin(in_mat1), + cv::gout(out_gapi.front()), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} + +TEST_F(ONNXtest, InferMultOutput) +{ + useModel("object_detection_segmentation/ssd-mobilenetv1/model/ssd_mobilenet_v1_10"); + // ONNX_API code + const auto prep_mat = in_mat1.reshape(1, {1, in_mat1.rows, in_mat1.cols, in_mat1.channels()}); + infer({prep_mat}, out_onnx); + // G_API code + using SSDOut = std::tuple; + G_API_NET(MobileNet, , "ssd_mobilenet"); + cv::GMat in; + cv::GMat out1, out2, out3, out4; + std::tie(out1, out2, out3, out4) = cv::gapi::infer(in); + cv::GComputation comp(cv::GIn(in), cv::GOut(out1, out2, out3, out4)); + auto net = cv::gapi::onnx::Params{model_path} + .cfgOutputLayers({"out1", "out2", "out3", "out4"}) + .cfgPostProc({cv::GMatDesc{CV_32F, {1, 100, 4}}, + cv::GMatDesc{CV_32F, {1, 100}}, + cv::GMatDesc{CV_32F, {1, 100}}, + cv::GMatDesc{CV_32F, {1, 1}}}, remapSsdPorts); + out_gapi.resize(num_out); + comp.apply(cv::gin(in_mat1), + cv::gout(out_gapi[0], out_gapi[1], out_gapi[2], out_gapi[3]), + cv::compile_args(cv::gapi::networks(net))); + // Validate + validate(); +} } // namespace opencv_test #endif // HAVE_ONNX From 4c9e3723e8a39c4779721cc404a41917dffaf068 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sun, 15 Nov 2020 19:22:05 +0000 Subject: [PATCH 101/152] objc: skip unsupported inner namespaces --- modules/objc/generator/gen_objc.py | 53 +++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 12 deletions(-) diff --git a/modules/objc/generator/gen_objc.py b/modules/objc/generator/gen_objc.py index 1975cb46fe..469b82b938 100755 --- a/modules/objc/generator/gen_objc.py +++ b/modules/objc/generator/gen_objc.py @@ -27,6 +27,10 @@ updated_files = 0 module_imports = [] +# list of namespaces, which should be skipped by wrapper generator +# the list is loaded from misc/objc/gen_dict.json defined for the module only +namespace_ignore_list = [] + # list of class names, which should be skipped by wrapper generator # the list is loaded from misc/objc/gen_dict.json defined for the module and its dependencies class_ignore_list = [] @@ -89,6 +93,14 @@ method_dict = { modules = [] + +class SkipSymbolException(Exception): + def __init__(self, text): + self.t = text + def __str__(self): + return self.t + + def read_contents(fname): with open(fname, 'r') as f: data = f.read() @@ -122,6 +134,10 @@ class GeneralInfo(): def __init__(self, type, decl, namespaces): self.symbol_id, self.namespace, self.classpath, self.classname, self.name = self.parseName(decl[0], namespaces) + for ns_ignore in namespace_ignore_list: + if self.symbol_id.startswith(ns_ignore + '.'): + raise SkipSymbolException('ignored namespace ({}): {}'.format(ns_ignore, self.symbol_id)) + # parse doxygen comments self.params={} @@ -709,6 +725,10 @@ class ObjectiveCWrapperGenerator(object): if self.isWrapped(name) and not classinfo.base: logging.warning('duplicated: %s', classinfo) return None + if name in self.classes: # TODO implement inner namespaces + if self.classes[name].symbol_id != classinfo.symbol_id: + logging.warning('duplicated under new id: {} (was {})'.format(classinfo.symbol_id, self.classes[name].symbol_id)) + return None self.classes[name] = classinfo if name in type_dict and not classinfo.base: logging.warning('duplicated: %s', classinfo) @@ -812,7 +832,12 @@ class ObjectiveCWrapperGenerator(object): elif not self.isWrapped(classname): logging.warning('not found: %s', fi) else: - self.getClass(classname).addMethod(fi) + ci = self.getClass(classname) + if ci.symbol_id != fi.symbol_id[0:fi.symbol_id.rfind('.')] and ci.symbol_id != self.Module: + # TODO fix this (inner namepaces) + logging.warning('SKIP: mismatched class: {} (class: {})'.format(fi.symbol_id, ci.symbol_id)) + return + ci.addMethod(fi) logging.info('ok: %s', fi) # calc args with def val cnt = len([a for a in fi.args if a.defval]) @@ -867,17 +892,20 @@ class ObjectiveCWrapperGenerator(object): for decl in decls: logging.info("\n--- Incoming ---\n%s", pformat(decl[:5], 4)) # without docstring name = decl[0] - if name.startswith("struct") or name.startswith("class"): - ci = self.add_class(decl) - if ci: - ci.header_import = header_import(hdr) - elif name.startswith("const"): - self.add_const(decl) - elif name.startswith("enum"): - # enum - self.add_enum(decl) - else: # function - self.add_func(decl) + try: + if name.startswith("struct") or name.startswith("class"): + ci = self.add_class(decl) + if ci: + ci.header_import = header_import(hdr) + elif name.startswith("const"): + self.add_const(decl) + elif name.startswith("enum"): + # enum + self.add_enum(decl) + else: # function + self.add_func(decl) + except SkipSymbolException as e: + logging.info('SKIP: {} due to {}'.format(name, e)) self.classes[self.Module].member_classes += manual_classes logging.info("\n\n===== Generating... =====") @@ -1602,6 +1630,7 @@ if __name__ == "__main__": if os.path.exists(gendict_fname): with open(gendict_fname) as f: gen_type_dict = json.load(f) + namespace_ignore_list = gen_type_dict.get("namespace_ignore_list", []) class_ignore_list += gen_type_dict.get("class_ignore_list", []) enum_ignore_list += gen_type_dict.get("enum_ignore_list", []) const_ignore_list += gen_type_dict.get("const_ignore_list", []) From 464d53bb167f1a3476d8daba32a7f3d5fcd8ecc7 Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Mon, 16 Nov 2020 21:34:42 +0000 Subject: [PATCH 102/152] python: emit "string" => "std::string" --- modules/python/src2/gen2.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/python/src2/gen2.py b/modules/python/src2/gen2.py index 233587c9cb..243442cbdd 100755 --- a/modules/python/src2/gen2.py +++ b/modules/python/src2/gen2.py @@ -201,7 +201,8 @@ simple_argtype_mapping = { "int": ArgTypeInfo("int", FormatStrings.int, "0", True), "float": ArgTypeInfo("float", FormatStrings.float, "0.f", True), "double": ArgTypeInfo("double", FormatStrings.double, "0", True), - "c_string": ArgTypeInfo("char*", FormatStrings.string, '(char*)""') + "c_string": ArgTypeInfo("char*", FormatStrings.string, '(char*)""'), + "string": ArgTypeInfo("std::string", FormatStrings.object, None, True), } From 3a184ae6778fb4a2f2baf1395d07707799ff2c95 Mon Sep 17 00:00:00 2001 From: Liubov Batanina Date: Tue, 17 Nov 2020 10:14:41 +0300 Subject: [PATCH 103/152] [ONNX] Added handler for int32 tensors --- modules/dnn/src/onnx/onnx_graph_simplifier.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp index e8b237cab4..e7856cf1a9 100644 --- a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp +++ b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp @@ -513,6 +513,19 @@ Mat getMatFromTensor(opencv_onnx::TensorProto& tensor_proto) CV_Assert(!field.empty()); Mat(sizes, CV_64FC1, (void*)field.data()).convertTo(blob, CV_32FC1); } + else if (datatype == opencv_onnx::TensorProto_DataType_INT32) + { + if (!tensor_proto.int32_data().empty()) + { + const ::google::protobuf::RepeatedField field = tensor_proto.int32_data(); + Mat(sizes, CV_32SC1, (void*)field.data()).copyTo(blob); + } + else + { + char* val = const_cast(tensor_proto.raw_data().c_str()); + Mat(sizes, CV_32SC1, val).copyTo(blob); + } + } else if (datatype == opencv_onnx::TensorProto_DataType_INT64) { blob.create(sizes, CV_32SC1); From 2b82f8f12c2ac6f66872e0e6316f8f0b21b6ee13 Mon Sep 17 00:00:00 2001 From: Sergei Slashchinin <62052793+sl-sergei@users.noreply.github.com> Date: Tue, 17 Nov 2020 12:52:08 +0300 Subject: [PATCH 104/152] Merge pull request #18296 from sl-sergei:fix_16783 Fix loading issue for Faster RCNN model from #16783 * Add a reproducer with multi-output Gather * Fix an issue with ONNX graph simplifier * fix build * Move checks to correct class * Minor changes for better code appearence --- .../dnn/src/onnx/onnx_graph_simplifier.cpp | 34 +++++++++++++++++++ modules/dnn/test/test_onnx_importer.cpp | 5 +++ 2 files changed, 39 insertions(+) diff --git a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp index e8b237cab4..30c0b26ead 100644 --- a/modules/dnn/src/onnx/onnx_graph_simplifier.cpp +++ b/modules/dnn/src/onnx/onnx_graph_simplifier.cpp @@ -260,6 +260,40 @@ public: addNodeToMatch("Cast", gather); setFusedNode("Gather", input, index); } + + virtual bool match(const Ptr& net, int nodeId, + std::vector& matchedNodesIds, + std::vector& targetNodesIds) CV_OVERRIDE + { + bool retVal = Subgraph::match(net, nodeId, matchedNodesIds, targetNodesIds); + size_t matchedNodesNum = matchedNodesIds.size(); + // Now we check if merging can be made for these Gather and Cast nodes + if (!retVal || matchedNodesNum < 2) + return retVal; + else { + int nodeToMatch = matchedNodesIds[matchedNodesNum - 1]; + const Ptr node = net->getNode(nodeToMatch); + if (node->getType() == "Cast") { + int inpNodeId = matchedNodesIds[matchedNodesNum - 2]; + const Ptr inpNode = net->getNode(inpNodeId); + if (inpNode->getType() == "Gather") { + int numNodes = net->getNumNodes(); + std::string inpNodeName = node->getInputName(0); + for (int i = 0; i < numNodes; ++i) { + const Ptr node_to_check = net->getNode(i); + int numInp = node_to_check->getNumInputs(); + for (int inp = 0; inp < numInp; ++inp) { + if (i != nodeToMatch && inpNodeName == node_to_check->getInputName(0)) { + // Another node has the same input node, so it cannot be merged. + return false; + } + } + } + } + } + } + return retVal; + } }; class ExpandSubgraph : public Subgraph diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 5c6de55da5..14d2d28522 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -705,6 +705,11 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias) normAssert(ref, out, "", default_l1, default_lInf); } +TEST_P(Test_ONNX_layers, GatherMultiOutput) +{ + testONNXModels("gather_multi_output"); +} + INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets()); class Test_ONNX_nets : public Test_ONNX_layers From 32e7ef8a3d2520e42a0ae1118872caf637ad945a Mon Sep 17 00:00:00 2001 From: Sergey Slashchinin Date: Tue, 17 Nov 2020 13:31:04 +0300 Subject: [PATCH 105/152] Add fixes and tests for different layers --- modules/dnn/include/opencv2/dnn/dnn.hpp | 3 + modules/dnn/src/dnn.cpp | 55 ++++++++++++++++- modules/dnn/src/layers/pooling_layer.cpp | 55 +++++++++++------ modules/dnn/src/layers/reshape_layer.cpp | 51 +++++++++++++++- modules/dnn/src/layers/slice_layer.cpp | 13 +++- modules/dnn/src/onnx/onnx_importer.cpp | 76 ++++++++++++++++++++---- modules/dnn/test/test_onnx_importer.cpp | 21 +++++++ 7 files changed, 243 insertions(+), 31 deletions(-) diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index 98a825940d..9cb7089bdd 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -354,9 +354,12 @@ CV__DNN_EXPERIMENTAL_NS_BEGIN const int requiredOutputs, std::vector &outputs, std::vector &internals) const; + virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const {CV_UNUSED(inputs); CV_UNUSED(outputs); return 0;} + virtual bool updateMemoryShapes(const std::vector &inputs); + CV_PROP String name; //!< Name of the layer instance, can be used for logging or other internal purposes. CV_PROP String type; //!< Type name which was used for creating layer by layer factory. CV_PROP int preferableTarget; //!< prefer target for layer forwarding diff --git a/modules/dnn/src/dnn.cpp b/modules/dnn/src/dnn.cpp index c789638793..efafd5d325 100644 --- a/modules/dnn/src/dnn.cpp +++ b/modules/dnn/src/dnn.cpp @@ -1119,6 +1119,7 @@ struct Net::Impl : public detail::NetImplBase preferableBackend = DNN_BACKEND_DEFAULT; preferableTarget = DNN_TARGET_CPU; skipInfEngineInit = false; + hasDynamicShapes = false; } Ptr netInputLayer; @@ -1130,6 +1131,7 @@ struct Net::Impl : public detail::NetImplBase int preferableTarget; String halideConfigFile; bool skipInfEngineInit; + bool hasDynamicShapes; // Map host data to backend specific wrapper. std::map > backendWrappers; @@ -3074,6 +3076,46 @@ struct Net::Impl : public detail::NetImplBase shapes = inOutShapes[layerId]; } + void updateLayersShapes() + { + CV_Assert(!layers[0].outputBlobs.empty()); + ShapesVec inputShapes; + for(int i = 0; i < layers[0].outputBlobs.size(); i++) + { + Mat& inp = layers[0].outputBlobs[i]; + CV_Assert(inp.total()); + if (preferableBackend == DNN_BACKEND_OPENCV && + preferableTarget == DNN_TARGET_OPENCL_FP16) + { + layers[0].outputBlobs[i].create(inp.dims, inp.size, CV_16S); + } + inputShapes.push_back(shape(inp)); + } + LayersShapesMap layersShapes; + layersShapes[0].in = inputShapes; + for (MapIdToLayerData::iterator it = layers.begin(); + it != layers.end(); it++) + { + int layerId = it->first; + std::vector& inputLayerIds = it->second.inputBlobsId; + if (layersShapes[layerId].in.empty()) + { + for(int i = 0; i < inputLayerIds.size(); i++) + { + int inputLayerId = inputLayerIds[i].lid; + LayersShapesMap::iterator inputIt = layersShapes.find(inputLayerId); + if(inputIt == layersShapes.end() || inputIt->second.out.empty()) + { + getLayerShapesRecursively(inputLayerId, layersShapes); + } + const MatShape& shape = layersShapes[inputLayerId].out[inputLayerIds[i].oid]; + layersShapes[layerId].in.push_back(shape); + } + it->second.layerInstance->updateMemoryShapes(layersShapes[layerId].in); + } + } + } + LayerPin getLatestLayerPin(const std::vector& pins) { return *std::max_element(pins.begin(), pins.end()); @@ -3487,6 +3529,8 @@ int Net::addLayer(const String &name, const String &type, LayerParams ¶ms) int id = ++impl->lastLayerId; impl->layerNameToId.insert(std::make_pair(name, id)); impl->layers.insert(std::make_pair(id, LayerData(id, name, type, params))); + if (params.get("has_dynamic_shapes", false)) + impl->hasDynamicShapes = true; return id; } @@ -3818,8 +3862,13 @@ void Net::setInput(InputArray blob, const String& name, double scalefactor, cons bool oldShape = prevShape == blobShape; blob_.copyTo(impl->netInputLayer->inputsData[pin.oid]); - if (!oldShape) + if (!oldShape) { ld.outputBlobs[pin.oid] = impl->netInputLayer->inputsData[pin.oid]; + if (impl->hasDynamicShapes) + { + impl->updateLayersShapes(); + } + } if (!ld.outputBlobsWrappers[pin.oid].empty()) { @@ -4746,6 +4795,10 @@ bool Layer::getMemoryShapes(const std::vector &inputs, return false; } +bool Layer::updateMemoryShapes(const std::vector &inputs) +{ + return true; +} ////////////////////////////////////////////////////////////////////////// static Mutex& getLayerFactoryMutex() diff --git a/modules/dnn/src/layers/pooling_layer.cpp b/modules/dnn/src/layers/pooling_layer.cpp index fd08fdbeb3..98417620ed 100644 --- a/modules/dnn/src/layers/pooling_layer.cpp +++ b/modules/dnn/src/layers/pooling_layer.cpp @@ -88,6 +88,9 @@ public: stride = Size(1, 1); pad_t = pad_l = pad_b = pad_r = 0; + hasDynamicShapes = params.get("has_dynamic_shapes", false); + shapesInitialized = !hasDynamicShapes; + if (params.has("pool") || params.has("kernel_size") || params.has("kernel_w") || params.has("kernel_h")) { @@ -1043,25 +1046,33 @@ virtual Ptr initNgraph(const std::vector >& inp outShape.push_back(pooledSize.height); outShape.push_back(pooledSize.width); } - else if (padMode.empty()) - { - for (int i = 0; i < local_kernel.size(); i++) { - float dst = (float)(inpShape[i] + pads_begin[i] + pads_end[i] - local_kernel[i]) / strides[i]; - outShape.push_back(1 + (ceilMode ? ceil(dst) : floor(dst))); - } - - // If we have padding, ensure that the last pooling starts strictly - // inside the image (instead of at the padding); otherwise clip the last. - for (int i = 0; i < pads_end.size(); i++) { - if (pads_end[i] && (outShape[2 + i] - 1) * strides[i] >= inpShape[i] + pads_end[i]) { - --outShape[2 + i]; - CV_Assert((outShape[2 + i] - 1) * strides[i] < inpShape[i] + pads_end[i]); - } - } - } else { - getConvPoolOutParams(inpShape, local_kernel, strides, padMode, std::vector(local_kernel.size(), 1), outShape); + if (hasDynamicShapes && !shapesInitialized) + { + //Just copy input shapes for width and height to prevent errors on loading stage + for (int i = 0; i < inpShape.size(); i++) + outShape.push_back(inpShape[i]); + } + else if (padMode.empty()) + { + for (int i = 0; i < local_kernel.size(); i++) { + float dst = (float) (inpShape[i] + pads_begin[i] + pads_end[i] - local_kernel[i]) / strides[i]; + outShape.push_back(1 + (ceilMode ? ceil(dst) : floor(dst))); + } + + // If we have padding, ensure that the last pooling starts strictly + // inside the image (instead of at the padding); otherwise clip the last. + for (int i = 0; i < pads_end.size(); i++) { + if (pads_end[i] && (outShape[2 + i] - 1) * strides[i] >= inpShape[i] + pads_end[i]) { + --outShape[2 + i]; + CV_Assert((outShape[2 + i] - 1) * strides[i] < inpShape[i] + pads_end[i]); + } + } + } else { + getConvPoolOutParams(inpShape, local_kernel, strides, padMode, + std::vector(local_kernel.size(), 1), outShape); + } } if (type == ROI) { @@ -1083,6 +1094,14 @@ virtual Ptr initNgraph(const std::vector >& inp return false; } + bool updateMemoryShapes(const std::vector &inputs) CV_OVERRIDE + { + int dims = inputs[0].size(); + CV_Assert(inputs[0][dims - 1] > 0 && inputs[0][dims - 2] > 0); + shapesInitialized = true; + return true; + } + virtual int64 getFLOPS(const std::vector &inputs, const std::vector &outputs) const CV_OVERRIDE { @@ -1114,6 +1133,8 @@ private: ROI, // RoI pooling, https://arxiv.org/pdf/1504.08083.pdf PSROI // Position-sensitive RoI pooling, https://arxiv.org/pdf/1605.06409.pdf }; + bool hasDynamicShapes; + bool shapesInitialized; }; Ptr PoolingLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/reshape_layer.cpp b/modules/dnn/src/layers/reshape_layer.cpp index a85a4e4a2f..642e7c52f6 100644 --- a/modules/dnn/src/layers/reshape_layer.cpp +++ b/modules/dnn/src/layers/reshape_layer.cpp @@ -164,6 +164,9 @@ public: setParamsFrom(params); int axis = params.get("axis", 0); int numAxes = params.get("num_axes", -1); + hasDynamicShapes = params.get("has_dynamic_shapes", false); + shapesInitialized = !hasDynamicShapes; + CV_Assert(numAxes >= -1); newShapeRange = (numAxes == -1) ? Range(axis, INT_MAX) : Range(axis, axis + numAxes); @@ -176,6 +179,25 @@ public: for (i = 0; i < dims; i++) newShapeDesc[i] = paramShape.get(i); } + if (hasDynamicShapes) + { + dynamicShapes.clear(); + inputIndices.clear(); + if (params.has("dynamic_axes")) { + CV_Assert(params.has("input_indices")); + const DictValue &dynamicAxes = params.get("dynamic_axes"); + const DictValue &dynamicInputShapes = params.get("input_indices"); + int i, dims = dynamicAxes.size(); + CV_Assert(dims == dynamicInputShapes.size()); + CV_Assert(dims > 0); + dynamicShapes.resize(dims); + inputIndices.resize(dims); + for (i = 0; i < dims; i++) { + dynamicShapes[i] = dynamicAxes.get(i); + inputIndices[i] = dynamicInputShapes.get(i); + } + } + } } virtual bool supportBackend(int backendId) CV_OVERRIDE @@ -189,13 +211,21 @@ public: std::vector &outputs, std::vector &internals) const CV_OVERRIDE { + if (inputs.size() == 1 || inputs.size() == requiredOutputs) { outputs.clear(); for (size_t i = 0; i < inputs.size(); i++) { - outputs.push_back(MatShape()); - computeShapeByReshapeMask(inputs[i], newShapeDesc, newShapeRange, outputs.back()); + if (hasDynamicShapes && !shapesInitialized) + { + outputs.push_back(newShapeDesc); + } + else + { + outputs.push_back(MatShape()); + computeShapeByReshapeMask(inputs[i], newShapeDesc, newShapeRange, outputs.back()); + } } } else @@ -206,6 +236,19 @@ public: return true; } + bool updateMemoryShapes(const std::vector &inputs) CV_OVERRIDE + { + if (hasDynamicShapes) + { + for (int i = 0; i < dynamicShapes.size(); ++i) + { + newShapeDesc[dynamicShapes[i]] = inputs[0][inputIndices[i]]; + } + } + shapesInitialized = true; + return true; + } + void finalize(InputArrayOfArrays, OutputArrayOfArrays outputs_arr) CV_OVERRIDE { std::vector outputs; @@ -287,6 +330,10 @@ public: private: std::vector outShapes; + std::vector dynamicShapes; // Which axes shapes are dynamic and require reinitialization with new input + std::vector inputIndices; // Which axes from input are needed to compute correct output shape + bool hasDynamicShapes; + bool shapesInitialized; }; Ptr ReshapeLayer::create(const LayerParams& params) diff --git a/modules/dnn/src/layers/slice_layer.cpp b/modules/dnn/src/layers/slice_layer.cpp index 9994677cb5..fd314b7c57 100644 --- a/modules/dnn/src/layers/slice_layer.cpp +++ b/modules/dnn/src/layers/slice_layer.cpp @@ -66,6 +66,8 @@ public: setParamsFrom(params); axis = params.get("axis", 1); num_split = params.get("num_split", 0); + hasDynamicShapes = params.get("has_dynamic_shapes", false); + shapesInitialized = !hasDynamicShapes; if (params.has("slice_point")) { CV_Assert(!params.has("begin") && !params.has("size") && !params.has("end")); @@ -143,7 +145,8 @@ public: CV_Assert(sliceRanges[i].size() <= inpShape.size()); for (int j = 0; j < sliceRanges[i].size(); ++j) { - outputs[i][j] = clamp(sliceRanges[i][j], inpShape[j]).size(); + if (shapesInitialized || inpShape[j] > 0) + outputs[i][j] = clamp(sliceRanges[i][j], inpShape[j]).size(); } } } @@ -158,6 +161,12 @@ public: return false; } + bool updateMemoryShapes(const std::vector &inputs) CV_OVERRIDE + { + shapesInitialized = true; + return true; + } + void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE { #ifdef HAVE_OPENCL @@ -564,6 +573,8 @@ public: protected: // The actual non-negative values determined from @p sliceRanges depends on input size. std::vector > finalSliceRanges; + bool hasDynamicShapes; + bool shapesInitialized; }; class CropLayerImpl CV_FINAL : public SliceLayerImpl diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 9443336305..756c8a5580 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -64,6 +64,7 @@ public: ONNXImporter(Net& net, const char *onnxFile) : dstNet(net) { + hasDynamicShapes = false; CV_Assert(onnxFile); CV_LOG_DEBUG(NULL, "DNN/ONNX: processing ONNX model from file: " << onnxFile); @@ -84,6 +85,7 @@ public: ONNXImporter(Net& net, const char* buffer, size_t sizeBuffer) : dstNet(net) { + hasDynamicShapes = false; CV_LOG_DEBUG(NULL, "DNN/ONNX: processing in-memory ONNX model (" << sizeBuffer << " bytes)"); struct _Buf : public std::streambuf @@ -115,6 +117,7 @@ protected: std::map constBlobs; std::map outShapes; // List of internal blobs shapes. + bool hasDynamicShapes; // Whether the model has inputs with dynamic shapes typedef std::map::iterator IterShape_t; std::map layer_id; @@ -413,8 +416,10 @@ void ONNXImporter::populateNet() for (int j = 0; j < inpShape.size(); ++j) { inpShape[j] = tensorShape.dim(j).dim_value(); + if (!tensorShape.dim(j).dim_param().empty()) + hasDynamicShapes = true; } - if (!inpShape.empty()) + if (!inpShape.empty() && !hasDynamicShapes) { inpShape[0] = std::max(inpShape[0], 1); // It's OK to have undetermined batch size } @@ -461,6 +466,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) layerParams.name = name; layerParams.type = layer_type; + layerParams.set("has_dynamic_shapes", hasDynamicShapes); if (layer_type == "MaxPool") { @@ -1276,6 +1282,20 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) { layerParams.type = "Reshape"; layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size())); + if (hasDynamicShapes) + { + std::vector dynamicAxes; + std::vector inputIndices; + for (int index = 0; index < inpShape.size(); ++index) + { + if (!maskedAxes[index]) + inputIndices.push_back(index); + } + for (int index = 0; index < outShape.size(); ++index) + dynamicAxes.push_back(index); + layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size())); + layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size())); + } } else layerParams.type = "Identity"; @@ -1338,6 +1358,19 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) outShape.insert(outShape.begin() + axis, 1); layerParams.type = "Reshape"; layerParams.set("dim", DictValue::arrayInt(&outShape[0], outShape.size())); + if (hasDynamicShapes) + { + std::vector dynamicAxes; + std::vector inputIndices; + for (int index = 0; index < outShape.size(); ++index) { + if (index != axis) + dynamicAxes.push_back(index); + } + for (int index = 0; index < inpShape.size(); ++index) + inputIndices.push_back(index); + layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size())); + layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size())); + } } else if (layer_type == "Expand") { @@ -1625,6 +1658,7 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) cv::dnn::DictValue paramEnd = cv::dnn::DictValue::arrayInt(end.data(), end.size()); sliceLp.set("begin", paramBegin); sliceLp.set("end", paramEnd); + sliceLp.set("has_dynamic_shapes", hasDynamicShapes); if (inpShape.size() > 1) { @@ -1637,6 +1671,17 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) layerParams.type = "Reshape"; layerParams.set("axis", 0); layerParams.set("dim", DictValue::arrayInt(&inpShape[0], inpShape.size())); + if (hasDynamicShapes) + { + std::vector dynamicAxes; + std::vector inputIndices; + for (int index = 0; index < inpShape.size(); ++index) + dynamicAxes.push_back(index); + for (int index = 0; index < inpShape.size(); ++index) + inputIndices.push_back(index); + layerParams.set("dynamic_axes", DictValue::arrayInt(dynamicAxes.data(), dynamicAxes.size())); + layerParams.set("input_indices", DictValue::arrayInt(inputIndices.data(), inputIndices.size())); + } node_proto.set_input(0, sliceLp.name); } else @@ -1676,7 +1721,11 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) for (int i = 1; i < node_proto.input_size(); i++) CV_Assert(layer_id.find(node_proto.input(i)) == layer_id.end()); - String interp_mode = layerParams.get("coordinate_transformation_mode"); + String interp_mode; + if (layerParams.has("coordinate_transformation_mode")) + interp_mode = layerParams.get("coordinate_transformation_mode"); + else + interp_mode = layerParams.get("mode"); CV_Assert_N(interp_mode != "tf_crop_and_resize", interp_mode != "tf_half_pixel_for_nn"); layerParams.set("align_corners", interp_mode == "align_corners"); @@ -1688,16 +1737,23 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) shapes.convertTo(shapes, CV_32S); int height = shapes.at(2); int width = shapes.at(3); - if (node_proto.input_size() == 3) + if (hasDynamicShapes) { - IterShape_t shapeIt = outShapes.find(node_proto.input(0)); - CV_Assert(shapeIt != outShapes.end()); - MatShape scales = shapeIt->second; - height *= scales[2]; - width *= scales[3]; + layerParams.set("zoom_factor_x", width); + layerParams.set("zoom_factor_y", height); + } + else + { + if (node_proto.input_size() == 3) { + IterShape_t shapeIt = outShapes.find(node_proto.input(0)); + CV_Assert(shapeIt != outShapes.end()); + MatShape scales = shapeIt->second; + height *= scales[2]; + width *= scales[3]; + } + layerParams.set("width", width); + layerParams.set("height", height); } - layerParams.set("width", width); - layerParams.set("height", height); if (layerParams.get("mode") == "linear") { layerParams.set("mode", interp_mode == "pytorch_half_pixel" ? diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 14d2d28522..9ddc17c97c 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -710,6 +710,27 @@ TEST_P(Test_ONNX_layers, GatherMultiOutput) testONNXModels("gather_multi_output"); } +TEST_P(Test_ONNX_layers, DynamicAxes) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NN_BUILDER); + } + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + } + testONNXModels("squeeze_and_conv_dynamic_axes"); + testONNXModels("unsqueeze_and_conv_dynamic_axes"); + testONNXModels("gather_dynamic_axes"); + testONNXModels("gather_scalar_dynamic_axes"); + testONNXModels("slice_dynamic_axes"); + testONNXModels("slice_opset_11_dynamic_axes"); + testONNXModels("resize_opset11_torch1.6_dynamic_axes"); + testONNXModels("average_pooling_dynamic_axes"); + testONNXModels("maxpooling_sigmoid_dynamic_axes"); +} + INSTANTIATE_TEST_CASE_P(/*nothing*/, Test_ONNX_layers, dnnBackendsAndTargets()); class Test_ONNX_nets : public Test_ONNX_layers From 23baf1a75e1fd70e300769654afec0024047fe7b Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Sat, 31 Oct 2020 14:08:13 +0000 Subject: [PATCH 106/152] dnn: fix High-Level public API (cv::dnn::Model class) - proxy selected Net methods only (don't derive from Net directly) - default Model ctor is protected --- modules/dnn/include/opencv2/dnn/dnn.hpp | 43 +++-- modules/dnn/include/opencv2/dnn/version.hpp | 2 +- modules/dnn/src/model.cpp | 188 ++++++++++++++------ modules/dnn/test/test_caffe_importer.cpp | 2 +- 4 files changed, 172 insertions(+), 63 deletions(-) diff --git a/modules/dnn/include/opencv2/dnn/dnn.hpp b/modules/dnn/include/opencv2/dnn/dnn.hpp index 3b12508c74..bf39ad8289 100644 --- a/modules/dnn/include/opencv2/dnn/dnn.hpp +++ b/modules/dnn/include/opencv2/dnn/dnn.hpp @@ -1072,14 +1072,17 @@ CV__DNN_INLINE_NS_BEGIN * Model creates net from file with trained weights and config, * sets preprocessing input and runs forward pass. */ - class CV_EXPORTS_W_SIMPLE Model : public Net + class CV_EXPORTS_W_SIMPLE Model { public: - /** - * @brief Default constructor. - */ + CV_DEPRECATED_EXTERNAL // avoid using in C++ code, will be moved to "protected" (need to fix bindings first) Model(); + Model(const Model&) = default; + Model(Model&&) = default; + Model& operator=(const Model&) = default; + Model& operator=(Model&&) = default; + /** * @brief Create model from deep learning network represented in one of the supported formats. * An order of @p model and @p config arguments does not matter. @@ -1100,13 +1103,12 @@ CV__DNN_INLINE_NS_BEGIN */ CV_WRAP Model& setInputSize(const Size& size); - /** @brief Set input size for frame. + /** @overload * @param[in] width New input width. * @param[in] height New input height. - * @note If shape of the new blob less than 0, - * then frame size not change. */ - CV_WRAP Model& setInputSize(int width, int height); + CV_WRAP inline + Model& setInputSize(int width, int height) { return setInputSize(Size(width, height)); } /** @brief Set mean value for frame. * @param[in] mean Scalar with mean values which are subtracted from channels. @@ -1143,10 +1145,31 @@ CV__DNN_INLINE_NS_BEGIN * @param[in] frame The input image. * @param[out] outs Allocated output blobs, which will store results of the computation. */ - CV_WRAP void predict(InputArray frame, OutputArrayOfArrays outs); + CV_WRAP void predict(InputArray frame, OutputArrayOfArrays outs) const; + + + // ============================== Net proxy methods ============================== + // Never expose methods with network implementation details, like: + // - addLayer, addLayerToPrev, connect, setInputsNames, setInputShape, setParam, getParam + // - getLayer*, getUnconnectedOutLayers, getUnconnectedOutLayersNames, getLayersShapes + // - forward* methods, setInput + + /// @sa Net::setPreferableBackend + CV_WRAP Model& setPreferableBackend(dnn::Backend backendId); + /// @sa Net::setPreferableTarget + CV_WRAP Model& setPreferableTarget(dnn::Target targetId); + + CV_DEPRECATED_EXTERNAL + operator Net&() const { return getNetwork_(); } + + //protected: - internal/tests usage only + Net& getNetwork_() const; + inline Net& getNetwork_() { return const_cast(this)->getNetwork_(); } - protected: struct Impl; + inline Impl* getImpl() const { return impl.get(); } + inline Impl& getImplRef() const { CV_DbgAssert(impl); return *impl.get(); } + protected: Ptr impl; }; diff --git a/modules/dnn/include/opencv2/dnn/version.hpp b/modules/dnn/include/opencv2/dnn/version.hpp index f91b44d142..62ecadb6f7 100644 --- a/modules/dnn/include/opencv2/dnn/version.hpp +++ b/modules/dnn/include/opencv2/dnn/version.hpp @@ -6,7 +6,7 @@ #define OPENCV_DNN_VERSION_HPP /// Use with major OpenCV version only. -#define OPENCV_DNN_API_VERSION 20200908 +#define OPENCV_DNN_API_VERSION 20201117 #if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS #define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION) diff --git a/modules/dnn/src/model.cpp b/modules/dnn/src/model.cpp index 677228bcf2..acee29e680 100644 --- a/modules/dnn/src/model.cpp +++ b/modules/dnn/src/model.cpp @@ -15,6 +15,9 @@ namespace dnn { struct Model::Impl { +//protected: + Net net; + Size size; Scalar mean; double scale = 1.0; @@ -23,7 +26,70 @@ struct Model::Impl Mat blob; std::vector outNames; - void predict(Net& net, const Mat& frame, OutputArrayOfArrays outs) +public: + virtual ~Impl() {} + Impl() {} + Impl(const Impl&) = delete; + Impl(Impl&&) = delete; + + virtual Net& getNetwork() const { return const_cast(net); } + + virtual void setPreferableBackend(Backend backendId) { net.setPreferableBackend(backendId); } + virtual void setPreferableTarget(Target targetId) { net.setPreferableTarget(targetId); } + + /*virtual*/ + void initNet(const Net& network) + { + net = network; + + outNames = net.getUnconnectedOutLayersNames(); + std::vector inLayerShapes; + std::vector outLayerShapes; + net.getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes); + if (!inLayerShapes.empty() && inLayerShapes[0].size() == 4) + size = Size(inLayerShapes[0][3], inLayerShapes[0][2]); + else + size = Size(); + } + + /*virtual*/ + void setInputParams(double scale_, const Size& size_, const Scalar& mean_, + bool swapRB_, bool crop_) + { + size = size_; + mean = mean_; + scale = scale_; + crop = crop_; + swapRB = swapRB_; + } + /*virtual*/ + void setInputSize(const Size& size_) + { + size = size_; + } + /*virtual*/ + void setInputMean(const Scalar& mean_) + { + mean = mean_; + } + /*virtual*/ + void setInputScale(double scale_) + { + scale = scale_; + } + /*virtual*/ + void setInputCrop(bool crop_) + { + crop = crop_; + } + /*virtual*/ + void setInputSwapRB(bool swapRB_) + { + swapRB = swapRB_; + } + + /*virtual*/ + void processFrame(InputArray frame, OutputArrayOfArrays outs) { if (size.empty()) CV_Error(Error::StsBadSize, "Input size not specified"); @@ -34,96 +100,115 @@ struct Model::Impl // Faster-RCNN or R-FCN if (net.getLayer(0)->outputNameToIndex("im_info") != -1) { - Mat imInfo = (Mat_(1, 3) << size.height, size.width, 1.6f); + Mat imInfo(Matx31f(size.height, size.width, 1.6f)); net.setInput(imInfo, "im_info"); } net.forward(outs, outNames); } }; -Model::Model() : impl(new Impl) {} +Model::Model() + : impl(makePtr()) +{ + // nothing +} Model::Model(const String& model, const String& config) - : Net(readNet(model, config)), impl(new Impl) + : Model() { - impl->outNames = getUnconnectedOutLayersNames(); - std::vector inLayerShapes; - std::vector outLayerShapes; - getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes); - if (!inLayerShapes.empty() && inLayerShapes[0].size() == 4) - impl->size = Size(inLayerShapes[0][3], inLayerShapes[0][2]); -}; + impl->initNet(readNet(model, config)); +} -Model::Model(const Net& network) : Net(network), impl(new Impl) +Model::Model(const Net& network) + : Model() { - impl->outNames = getUnconnectedOutLayersNames(); - std::vector inLayerShapes; - std::vector outLayerShapes; - getLayerShapes(MatShape(), 0, inLayerShapes, outLayerShapes); - if (!inLayerShapes.empty() && inLayerShapes[0].size() == 4) - impl->size = Size(inLayerShapes[0][3], inLayerShapes[0][2]); -}; + impl->initNet(network); +} -Model& Model::setInputSize(const Size& size) +Net& Model::getNetwork_() const { - impl->size = size; + CV_DbgAssert(impl); + return impl->getNetwork(); +} + +Model& Model::setPreferableBackend(Backend backendId) +{ + CV_DbgAssert(impl); + impl->setPreferableBackend(backendId); + return *this; +} +Model& Model::setPreferableTarget(Target targetId) +{ + CV_DbgAssert(impl); + impl->setPreferableTarget(targetId); return *this; } -Model& Model::setInputSize(int width, int height) +Model& Model::setInputSize(const Size& size) { - impl->size = Size(width, height); + CV_DbgAssert(impl); + impl->setInputSize(size); return *this; } Model& Model::setInputMean(const Scalar& mean) { - impl->mean = mean; + CV_DbgAssert(impl); + impl->setInputMean(mean); return *this; } Model& Model::setInputScale(double scale) { - impl->scale = scale; + CV_DbgAssert(impl); + impl->setInputScale(scale); return *this; } Model& Model::setInputCrop(bool crop) { - impl->crop = crop; + CV_DbgAssert(impl); + impl->setInputCrop(crop); return *this; } Model& Model::setInputSwapRB(bool swapRB) { - impl->swapRB = swapRB; + CV_DbgAssert(impl); + impl->setInputSwapRB(swapRB); return *this; } void Model::setInputParams(double scale, const Size& size, const Scalar& mean, bool swapRB, bool crop) { - impl->size = size; - impl->mean = mean; - impl->scale = scale; - impl->crop = crop; - impl->swapRB = swapRB; + CV_DbgAssert(impl); + impl->setInputParams(scale, size, mean, swapRB, crop); } -void Model::predict(InputArray frame, OutputArrayOfArrays outs) +void Model::predict(InputArray frame, OutputArrayOfArrays outs) const { - impl->predict(*this, frame.getMat(), outs); + CV_DbgAssert(impl); + impl->processFrame(frame, outs); } + ClassificationModel::ClassificationModel(const String& model, const String& config) - : Model(model, config) {}; + : Model(model, config) +{ + // nothing +} -ClassificationModel::ClassificationModel(const Net& network) : Model(network) {}; +ClassificationModel::ClassificationModel(const Net& network) + : Model(network) +{ + // nothing +} std::pair ClassificationModel::classify(InputArray frame) { std::vector outs; - impl->predict(*this, frame.getMat(), outs); + impl->processFrame(frame, outs); CV_Assert(outs.size() == 1); double conf; @@ -145,11 +230,11 @@ KeypointsModel::KeypointsModel(const Net& network) : Model(network) {}; std::vector KeypointsModel::estimate(InputArray frame, float thresh) { - int frameHeight = frame.getMat().size[0]; - int frameWidth = frame.getMat().size[1]; + int frameHeight = frame.rows(); + int frameWidth = frame.cols(); std::vector outs; - impl->predict(*this, frame.getMat(), outs); + impl->processFrame(frame, outs); CV_Assert(outs.size() == 1); Mat output = outs[0]; @@ -202,9 +287,8 @@ SegmentationModel::SegmentationModel(const Net& network) : Model(network) {}; void SegmentationModel::segment(InputArray frame, OutputArray mask) { - std::vector outs; - impl->predict(*this, frame.getMat(), outs); + impl->processFrame(frame, outs); CV_Assert(outs.size() == 1); Mat score = outs[0]; @@ -250,12 +334,14 @@ void disableRegionNMS(Net& net) } DetectionModel::DetectionModel(const String& model, const String& config) - : Model(model, config) { - disableRegionNMS(*this); + : Model(model, config) +{ + disableRegionNMS(getNetwork_()); // FIXIT Move to DetectionModel::Impl::initNet() } -DetectionModel::DetectionModel(const Net& network) : Model(network) { - disableRegionNMS(*this); +DetectionModel::DetectionModel(const Net& network) : Model(network) +{ + disableRegionNMS(getNetwork_()); // FIXIT Move to DetectionModel::Impl::initNet() } void DetectionModel::detect(InputArray frame, CV_OUT std::vector& classIds, @@ -263,7 +349,7 @@ void DetectionModel::detect(InputArray frame, CV_OUT std::vector& classIds, float confThreshold, float nmsThreshold) { std::vector detections; - impl->predict(*this, frame.getMat(), detections); + impl->processFrame(frame, detections); boxes.clear(); confidences.clear(); @@ -271,15 +357,15 @@ void DetectionModel::detect(InputArray frame, CV_OUT std::vector& classIds, int frameWidth = frame.cols(); int frameHeight = frame.rows(); - if (getLayer(0)->outputNameToIndex("im_info") != -1) + if (getNetwork_().getLayer(0)->outputNameToIndex("im_info") != -1) { frameWidth = impl->size.width; frameHeight = impl->size.height; } - std::vector layerNames = getLayerNames(); - int lastLayerId = getLayerId(layerNames.back()); - Ptr lastLayer = getLayer(lastLayerId); + std::vector layerNames = getNetwork_().getLayerNames(); + int lastLayerId = getNetwork_().getLayerId(layerNames.back()); + Ptr lastLayer = getNetwork_().getLayer(lastLayerId); if (lastLayer->type == "DetectionOutput") { diff --git a/modules/dnn/test/test_caffe_importer.cpp b/modules/dnn/test/test_caffe_importer.cpp index e1ffa762de..5440f4734f 100644 --- a/modules/dnn/test/test_caffe_importer.cpp +++ b/modules/dnn/test/test_caffe_importer.cpp @@ -563,7 +563,7 @@ TEST_P(Test_Caffe_nets, DenseNet_121) } normAssert(outs[0], ref, "", l1, lInf); if (target != DNN_TARGET_MYRIAD || getInferenceEngineVPUType() != CV_DNN_INFERENCE_ENGINE_VPU_TYPE_MYRIAD_X) - expectNoFallbacksFromIE(model); + expectNoFallbacksFromIE(model.getNetwork_()); } TEST(Test_Caffe, multiple_inputs) From 3cdf9264545b1ce47e9cc19ae25e23d2950c53e9 Mon Sep 17 00:00:00 2001 From: Sergey Slashchinin Date: Tue, 17 Nov 2020 14:33:39 +0300 Subject: [PATCH 107/152] disable Conv1d test on NGRAPH/MYRIAD --- modules/dnn/test/test_onnx_importer.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 14d2d28522..c115be728a 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -685,6 +685,10 @@ TEST_P(Test_ONNX_layers, Conv1d_variable_weight) TEST_P(Test_ONNX_layers, Conv1d_variable_weight_bias) { + if (backend == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) + { + if (target == DNN_TARGET_MYRIAD) applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH); + } String basename = "conv1d_variable_wb"; Net net = readNetFromONNX(_tf("models/" + basename + ".onnx")); ASSERT_FALSE(net.empty()); From 72d06080c6fc8009e38eb991c05ad075b44be5e5 Mon Sep 17 00:00:00 2001 From: Liubov Batanina Date: Tue, 17 Nov 2020 14:45:36 +0300 Subject: [PATCH 108/152] [ONNX] Added Reduce ops for batch and channel --- modules/dnn/src/onnx/onnx_importer.cpp | 33 ++++++++++++++++++++++--- modules/dnn/test/test_onnx_importer.cpp | 4 ++- 2 files changed, 32 insertions(+), 5 deletions(-) diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index 9443336305..32f7f02e9d 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -551,11 +551,36 @@ void ONNXImporter::handleNode(const opencv_onnx::NodeProto& node_proto_) CV_Assert(axes.size() <= inpShape.size() - 2); std::vector kernel_size(inpShape.size() - 2, 1); - for (int i = 0; i < axes.size(); i++) { - int axis = clamp(axes.get(i), inpShape.size()); - CV_Assert_N(axis >= 2 + i, axis < inpShape.size()); - kernel_size[axis - 2] = inpShape[axis]; + if (axes.size() == 1 && (clamp(axes.get(0), inpShape.size()) <= 1)) + { + int axis = clamp(axes.get(0), inpShape.size()); + MatShape newShape = inpShape; + newShape[axis + 1] = total(newShape, axis + 1); + newShape.resize(axis + 2); + newShape.insert(newShape.begin(), 2 - axis, 1); + + LayerParams reshapeLp; + reshapeLp.type = "Reshape"; + reshapeLp.name = layerParams.name + "/reshape"; + CV_Assert(layer_id.find(reshapeLp.name) == layer_id.end()); + reshapeLp.set("dim", DictValue::arrayInt(&newShape[0], newShape.size())); + + node_proto.set_output(0, reshapeLp.name); + addLayer(reshapeLp, node_proto); + + kernel_size.resize(2); + kernel_size[0] = inpShape[axis]; + node_proto.set_input(0, node_proto.output(0)); } + else + { + for (int i = 0; i < axes.size(); i++) { + int axis = clamp(axes.get(i), inpShape.size()); + CV_Assert_N(axis >= 2 + i, axis < inpShape.size()); + kernel_size[axis - 2] = inpShape[axis]; + } + } + LayerParams poolLp = layerParams; poolLp.name = layerParams.name + "/avg"; CV_Assert(layer_id.find(poolLp.name) == layer_id.end()); diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 14d2d28522..1c5d2e5289 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -267,9 +267,11 @@ TEST_P(Test_ONNX_layers, ReduceSum) testONNXModels("reduce_sum"); } -TEST_P(Test_ONNX_layers, ReduceMaxGlobal) +TEST_P(Test_ONNX_layers, ReduceMax) { testONNXModels("reduce_max"); + testONNXModels("reduce_max_axis_0"); + testONNXModels("reduce_max_axis_1"); } TEST_P(Test_ONNX_layers, Scale) From 2b558a3787dc441b11e6c52d5a461b0e2e05795b Mon Sep 17 00:00:00 2001 From: Alexander Alekhin Date: Wed, 11 Nov 2020 17:57:53 +0000 Subject: [PATCH 109/152] core: fix F16C compilation check --- .../include/opencv2/core/cv_cpu_dispatch.h | 5 +++++ modules/core/include/opencv2/core/cvdef.h | 4 ++-- .../include/opencv2/core/hal/intrin_avx.hpp | 21 +++++++++++++++++++ modules/core/src/convert.simd.hpp | 5 +++++ modules/core/test/test_intrin.cpp | 8 +++++-- modules/core/test/test_intrin_utils.hpp | 6 +++--- 6 files changed, 42 insertions(+), 7 deletions(-) diff --git a/modules/core/include/opencv2/core/cv_cpu_dispatch.h b/modules/core/include/opencv2/core/cv_cpu_dispatch.h index 42651aed5e..540fbb605c 100644 --- a/modules/core/include/opencv2/core/cv_cpu_dispatch.h +++ b/modules/core/include/opencv2/core/cv_cpu_dispatch.h @@ -216,6 +216,11 @@ struct VZeroUpperGuard { # define CV_VSX 1 #endif +#ifdef __F16C__ +# include +# define CV_FP16 1 +#endif + #endif // !__OPENCV_BUILD && !__CUDACC (Compatibility code) diff --git a/modules/core/include/opencv2/core/cvdef.h b/modules/core/include/opencv2/core/cvdef.h index 5bd3af33a4..6488b8bd4f 100644 --- a/modules/core/include/opencv2/core/cvdef.h +++ b/modules/core/include/opencv2/core/cvdef.h @@ -765,7 +765,7 @@ protected: float16_t() {} explicit float16_t(float x) { - #if CV_AVX2 + #if CV_FP16 __m128 v = _mm_load_ss(&x); w = (ushort)_mm_cvtsi128_si32(_mm_cvtps_ph(v, 0)); #else @@ -796,7 +796,7 @@ protected: operator float() const { - #if CV_AVX2 + #if CV_FP16 float f; _mm_store_ss(&f, _mm_cvtph_ps(_mm_cvtsi32_si128(w))); return f; diff --git a/modules/core/include/opencv2/core/hal/intrin_avx.hpp b/modules/core/include/opencv2/core/hal/intrin_avx.hpp index 5dc5bb567d..54e8927192 100644 --- a/modules/core/include/opencv2/core/hal/intrin_avx.hpp +++ b/modules/core/include/opencv2/core/hal/intrin_avx.hpp @@ -3121,18 +3121,39 @@ OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float32x8, float, f32, v_uint32x8, un OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_int64x4, int64, s64, v_uint64x4, uint64, u64) OPENCV_HAL_IMPL_AVX_LOADSTORE_INTERLEAVE(v_float64x4, double, f64, v_uint64x4, uint64, u64) +// // FP16 +// + inline v_float32x8 v256_load_expand(const float16_t* ptr) { +#if CV_FP16 return v_float32x8(_mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)ptr))); +#else + float CV_DECL_ALIGNED(32) buf[8]; + for (int i = 0; i < 8; i++) + buf[i] = (float)ptr[i]; + return v256_load_aligned(buf); +#endif } inline void v_pack_store(float16_t* ptr, const v_float32x8& a) { +#if CV_FP16 __m128i ah = _mm256_cvtps_ph(a.val, 0); _mm_storeu_si128((__m128i*)ptr, ah); +#else + float CV_DECL_ALIGNED(32) buf[8]; + v_store_aligned(buf, a); + for (int i = 0; i < 8; i++) + ptr[i] = float16_t(buf[i]); +#endif } +// +// end of FP16 +// + inline void v256_cleanup() { _mm256_zeroall(); } CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END diff --git a/modules/core/src/convert.simd.hpp b/modules/core/src/convert.simd.hpp index a16a1a8405..4af5533870 100644 --- a/modules/core/src/convert.simd.hpp +++ b/modules/core/src/convert.simd.hpp @@ -5,6 +5,11 @@ #include "precomp.hpp" #include "convert.hpp" +#if !defined(OPENCV_SUPRESS_WARNING_AVX2_WITHOUT_FP16C) && \ + (defined(__GNUC__) && defined(__AVX2__) && !defined(__F16C__)) +#warning "Non-optimal compiler flags: AVX2 without FP16. Generated code is very slow. Consider adding '-mf16c' compiler option." +#endif + namespace cv { CV_CPU_OPTIMIZATION_NAMESPACE_BEGIN diff --git a/modules/core/test/test_intrin.cpp b/modules/core/test/test_intrin.cpp index 321fa64264..71d61e14e0 100644 --- a/modules/core/test/test_intrin.cpp +++ b/modules/core/test/test_intrin.cpp @@ -126,9 +126,11 @@ DEFINE_SIMD_TESTS(256, AVX512_SKX) TEST(hal_intrin256, float16x16_FP16) { +#if CV_TRY_FP16 //CV_CPU_CALL_FP16_(test_hal_intrin_float16, ()); CV_CPU_CALL_AVX2_(test_hal_intrin_float16, ()); - throw SkipTestException("Unsupported hardware: FP16 is not available"); +#endif + throw SkipTestException("Unsupported: FP16 is not available"); } @@ -142,8 +144,10 @@ namespace intrin512 { TEST(hal_intrin512, float16x32_FP16) { +#if CV_TRY_FP16 CV_CPU_CALL_AVX512_SKX_(test_hal_intrin_float16, ()); - throw SkipTestException("Unsupported hardware: FP16 is not available"); +#endif + throw SkipTestException("Unsupported: FP16 is not available"); } diff --git a/modules/core/test/test_intrin_utils.hpp b/modules/core/test/test_intrin_utils.hpp index 6731091463..84da496b42 100644 --- a/modules/core/test/test_intrin_utils.hpp +++ b/modules/core/test/test_intrin_utils.hpp @@ -1902,21 +1902,21 @@ void test_hal_intrin_float64() #endif } -#if CV_FP16 void test_hal_intrin_float16() { DUMP_ENTRY(v_float16); #if CV_FP16 TheTest() .test_loadstore_fp16_f32() -#endif #if CV_SIMD_FP16 .test_loadstore_fp16() .test_float_cvt_fp16() #endif ; -} +#else + std::cout << "SKIP: CV_FP16 is not available" << std::endl; #endif +} /*#if defined(CV_CPU_DISPATCH_MODE_FP16) && CV_CPU_DISPATCH_MODE == FP16 void test_hal_intrin_float16() From b866d0dc388dd9705c1171cfbbe16e7aebe1f84c Mon Sep 17 00:00:00 2001 From: Dmitry Matveev Date: Tue, 17 Nov 2020 17:04:19 +0300 Subject: [PATCH 110/152] Merge pull request #18793 from dmatveev:dm/in_graph_metadata G-API: Introduce runtime in-graph metadata * G-API: In-graph metadata -- initial implementation * G-API: Finish the in-graph metadata implementation for Streaming * G-API: Fix standalone build & warnings for in-graph metadata * G-API: In-graph meta -- fixed review comments * G-API: Fix issues with desync causing failing tests --- modules/gapi/CMakeLists.txt | 10 +- modules/gapi/include/opencv2/gapi/garg.hpp | 67 +++++- modules/gapi/include/opencv2/gapi/gopaque.hpp | 13 ++ .../include/opencv2/gapi/streaming/cap.hpp | 30 ++- .../include/opencv2/gapi/streaming/meta.hpp | 79 +++++++ modules/gapi/src/api/gbackend.cpp | 38 +++- modules/gapi/src/api/grunarg.cpp | 33 +++ modules/gapi/src/backends/common/gbackend.hpp | 11 + .../gapi/src/backends/common/gmetabackend.cpp | 105 ++++++++++ .../gapi/src/backends/common/gmetabackend.hpp | 16 ++ modules/gapi/src/compiler/gcompiler.cpp | 4 +- modules/gapi/src/compiler/gislandmodel.cpp | 35 +++- modules/gapi/src/compiler/gislandmodel.hpp | 4 + modules/gapi/src/executor/gexecutor.cpp | 53 ++++- .../gapi/src/executor/gstreamingexecutor.cpp | 43 +++- modules/gapi/test/gapi_graph_meta_tests.cpp | 195 ++++++++++++++++++ 16 files changed, 681 insertions(+), 55 deletions(-) create mode 100644 modules/gapi/include/opencv2/gapi/streaming/meta.hpp create mode 100644 modules/gapi/src/api/grunarg.cpp create mode 100644 modules/gapi/src/backends/common/gmetabackend.cpp create mode 100644 modules/gapi/src/backends/common/gmetabackend.hpp create mode 100644 modules/gapi/test/gapi_graph_meta_tests.cpp diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt index d95f255951..ee275fe1af 100644 --- a/modules/gapi/CMakeLists.txt +++ b/modules/gapi/CMakeLists.txt @@ -57,6 +57,7 @@ file(GLOB gapi_ext_hdrs set(gapi_srcs # Front-end part + src/api/grunarg.cpp src/api/gorigin.cpp src/api/gmat.cpp src/api/garray.cpp @@ -131,18 +132,19 @@ set(gapi_srcs src/backends/ie/giebackend.cpp src/backends/ie/giebackend/giewrapper.cpp - # ONNX Backend. + # ONNX backend src/backends/onnx/gonnxbackend.cpp - # Render Backend. + # Render backend src/backends/render/grenderocv.cpp src/backends/render/ft_render.cpp - #PlaidML Backend + # PlaidML Backend src/backends/plaidml/gplaidmlcore.cpp src/backends/plaidml/gplaidmlbackend.cpp - # Compound + # Common backend code + src/backends/common/gmetabackend.cpp src/backends/common/gcompoundbackend.cpp src/backends/common/gcompoundkernel.cpp diff --git a/modules/gapi/include/opencv2/gapi/garg.hpp b/modules/gapi/include/opencv2/gapi/garg.hpp index 67ce0d990c..0838573b56 100644 --- a/modules/gapi/include/opencv2/gapi/garg.hpp +++ b/modules/gapi/include/opencv2/gapi/garg.hpp @@ -9,12 +9,14 @@ #define OPENCV_GAPI_GARG_HPP #include +#include #include #include #include #include +#include #include #include @@ -93,7 +95,7 @@ using GArgs = std::vector; // FIXME: Express as M::type // FIXME: Move to a separate file! -using GRunArg = util::variant< +using GRunArgBase = util::variant< #if !defined(GAPI_STANDALONE) cv::UMat, #endif // !defined(GAPI_STANDALONE) @@ -105,6 +107,61 @@ using GRunArg = util::variant< cv::detail::OpaqueRef, cv::MediaFrame >; + +namespace detail { +template +struct in_variant; + +template +struct in_variant > + : std::integral_constant::value > { +}; +} // namespace detail + +struct GAPI_EXPORTS GRunArg: public GRunArgBase +{ + // Metadata information here + using Meta = std::unordered_map; + Meta meta; + + // Mimic the old GRunArg semantics here, old of the times when + // GRunArg was an alias to variant<> + GRunArg(); + GRunArg(const cv::GRunArg &arg); + GRunArg(cv::GRunArg &&arg); + + GRunArg& operator= (const GRunArg &arg); + GRunArg& operator= (GRunArg &&arg); + + template + GRunArg(const T &t, + const Meta &m = Meta{}, + typename std::enable_if< detail::in_variant::value, int>::type = 0) + : GRunArgBase(t) + , meta(m) + { + } + template + GRunArg(T &&t, + const Meta &m = Meta{}, + typename std::enable_if< detail::in_variant::value, int>::type = 0) + : GRunArgBase(std::move(t)) + , meta(m) + { + } + template auto operator= (const T &t) + -> typename std::enable_if< detail::in_variant::value, cv::GRunArg>::type& + { + GRunArgBase::operator=(t); + return *this; + } + template auto operator= (T&& t) + -> typename std::enable_if< detail::in_variant::value, cv::GRunArg>::type& + { + GRunArgBase::operator=(std::move(t)); + return *this; + } +}; using GRunArgs = std::vector; // TODO: Think about the addition operator @@ -129,11 +186,13 @@ namespace gapi namespace wip { /** - * @brief This aggregate type represents all types which G-API can handle (via variant). + * @brief This aggregate type represents all types which G-API can + * handle (via variant). * - * It only exists to overcome C++ language limitations (where a `using`-defined class can't be forward-declared). + * It only exists to overcome C++ language limitations (where a + * `using`-defined class can't be forward-declared). */ -struct Data: public GRunArg +struct GAPI_EXPORTS Data: public GRunArg { using GRunArg::GRunArg; template diff --git a/modules/gapi/include/opencv2/gapi/gopaque.hpp b/modules/gapi/include/opencv2/gapi/gopaque.hpp index 6ab28910d6..6117971768 100644 --- a/modules/gapi/include/opencv2/gapi/gopaque.hpp +++ b/modules/gapi/include/opencv2/gapi/gopaque.hpp @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -119,6 +120,7 @@ namespace detail virtual void mov(BasicOpaqueRef &ref) = 0; virtual const void* ptr() const = 0; + virtual void set(const cv::util::any &a) = 0; }; template class OpaqueRefT final: public BasicOpaqueRef @@ -212,6 +214,10 @@ namespace detail } virtual const void* ptr() const override { return &rref(); } + + virtual void set(const cv::util::any &a) override { + wref() = util::any_cast(a); + } }; // This class strips type information from OpaqueRefT<> and makes it usable @@ -285,6 +291,13 @@ namespace detail // May be used to uniquely identify this object internally const void *ptr() const { return m_ref->ptr(); } + + // Introduced for in-graph meta handling + OpaqueRef& operator= (const cv::util::any &a) + { + m_ref->set(a); + return *this; + } }; } // namespace detail diff --git a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp index 9781ef1ffb..aad6af618c 100644 --- a/modules/gapi/include/opencv2/gapi/streaming/cap.hpp +++ b/modules/gapi/include/opencv2/gapi/streaming/cap.hpp @@ -21,9 +21,11 @@ * Note for developers: please don't put videoio dependency in G-API * because of this file. */ +#include #include #include +#include namespace cv { namespace gapi { @@ -55,6 +57,7 @@ protected: cv::VideoCapture cap; cv::Mat first; bool first_pulled = false; + int64_t counter = 0; void prep() { @@ -80,19 +83,26 @@ protected: GAPI_Assert(!first.empty()); first_pulled = true; data = first; // no need to clone here since it was cloned already - return true; } - - if (!cap.isOpened()) return false; - - cv::Mat frame; - if (!cap.read(frame)) + else { - // end-of-stream happened - return false; + if (!cap.isOpened()) return false; + + cv::Mat frame; + if (!cap.read(frame)) + { + // end-of-stream happened + return false; + } + // Same reason to clone as in prep() + data = frame.clone(); } - // Same reason to clone as in prep() - data = frame.clone(); + // Tag data with seq_id/ts + const auto now = std::chrono::system_clock::now(); + const auto dur = std::chrono::duration_cast + (now.time_since_epoch()); + data.meta[cv::gapi::streaming::meta_tag::timestamp] = int64_t{dur.count()}; + data.meta[cv::gapi::streaming::meta_tag::seq_id] = int64_t{counter++}; return true; } diff --git a/modules/gapi/include/opencv2/gapi/streaming/meta.hpp b/modules/gapi/include/opencv2/gapi/streaming/meta.hpp new file mode 100644 index 0000000000..cbcfc3aa37 --- /dev/null +++ b/modules/gapi/include/opencv2/gapi/streaming/meta.hpp @@ -0,0 +1,79 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + + +#ifndef OPENCV_GAPI_GSTREAMING_META_HPP +#define OPENCV_GAPI_GSTREAMING_META_HPP + +#include +#include +#include +#include + +namespace cv { +namespace gapi { +namespace streaming { + +// FIXME: the name is debatable +namespace meta_tag { +static constexpr const char * timestamp = "org.opencv.gapi.meta.timestamp"; +static constexpr const char * seq_id = "org.opencv.gapi.meta.seq_id"; +} // namespace meta_tag + +namespace detail { +struct GMeta { + static const char *id() { + return "org.opencv.streaming.meta"; + } + // A universal yield for meta(), same as in GDesync + template + static std::tuple yield(cv::GCall &call, cv::detail::Seq) { + return std::make_tuple(cv::detail::Yield::yield(call, IIs)...); + } + // Also a universal outMeta stub here + static GMetaArgs getOutMeta(const GMetaArgs &args, const GArgs &) { + return args; + } +}; +} // namespace detail + +template +cv::GOpaque meta(G g, const std::string &tag) { + using O = cv::GOpaque; + cv::GKernel k{ + detail::GMeta::id() // kernel id + , tag // kernel tag. Use meta tag here + , &detail::GMeta::getOutMeta // outMeta callback + , {cv::detail::GTypeTraits::shape} // output Shape + , {cv::detail::GTypeTraits::op_kind} // input data kinds + , {cv::detail::GObtainCtor::get()} // output template ctors + }; + cv::GCall call(std::move(k)); + call.pass(g); + return std::get<0>(detail::GMeta::yield(call, cv::detail::MkSeq<1>::type())); +} + +template +cv::GOpaque timestamp(G g) { + return meta(g, meta_tag::timestamp); +} + +template +cv::GOpaque seq_id(G g) { + return meta(g, meta_tag::seq_id); +} + +template +cv::GOpaque seqNo(G g) { + // Old name, compatibility only + return seq_id(g); +} + +} // namespace streaming +} // namespace gapi +} // namespace cv + +#endif // OPENCV_GAPI_GSTREAMING_META_HPP diff --git a/modules/gapi/src/api/gbackend.cpp b/modules/gapi/src/api/gbackend.cpp index 6b8d0fcbee..fd4a5eb38b 100644 --- a/modules/gapi/src/api/gbackend.cpp +++ b/modules/gapi/src/api/gbackend.cpp @@ -143,6 +143,14 @@ void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handle if (handleRMat == HandleRMat::SKIP) return; GAPI_Assert(arg.index() == GRunArg::index_of()); bindRMat(mag, rc, util::get(arg), RMat::Access::R); + + // FIXME: Here meta may^WWILL be copied multiple times! + // Replace it is reference-counted object? + mag.meta()[rc.id] = arg.meta; + mag.meta()[rc.id] = arg.meta; +#if !defined(GAPI_STANDALONE) + mag.meta()[rc.id] = arg.meta; +#endif break; } @@ -154,19 +162,23 @@ void bindInArg(Mag& mag, const RcDesc &rc, const GRunArg &arg, HandleRMat handle case GRunArg::index_of() : mag_scalar = util::get(arg); break; default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); } + mag.meta()[rc.id] = arg.meta; break; } case GShape::GARRAY: - mag.template slot()[rc.id] = util::get(arg); + mag.slot()[rc.id] = util::get(arg); + mag.meta()[rc.id] = arg.meta; break; case GShape::GOPAQUE: - mag.template slot()[rc.id] = util::get(arg); + mag.slot()[rc.id] = util::get(arg); + mag.meta()[rc.id] = arg.meta; break; case GShape::GFRAME: - mag.template slot()[rc.id] = util::get(arg); + mag.slot()[rc.id] = util::get(arg); + mag.meta()[rc.id] = arg.meta; break; default: @@ -250,13 +262,23 @@ cv::GRunArg getArg(const Mag& mag, const RcDesc &ref) // Wrap associated CPU object (either host or an internal one) switch (ref.shape) { - case GShape::GMAT: return GRunArg(mag.template slot().at(ref.id)); - case GShape::GSCALAR: return GRunArg(mag.template slot().at(ref.id)); + case GShape::GMAT: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); + case GShape::GSCALAR: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); // Note: .at() is intentional for GArray and GOpaque as objects MUST be already there // (and constructed by either bindIn/Out or resetInternal) - case GShape::GARRAY: return GRunArg(mag.template slot().at(ref.id)); - case GShape::GOPAQUE: return GRunArg(mag.template slot().at(ref.id)); - case GShape::GFRAME: return GRunArg(mag.template slot().at(ref.id)); + case GShape::GARRAY: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); + case GShape::GOPAQUE: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); + case GShape::GFRAME: + return GRunArg(mag.slot().at(ref.id), + mag.meta().at(ref.id)); default: util::throw_error(std::logic_error("Unsupported GShape type")); break; diff --git a/modules/gapi/src/api/grunarg.cpp b/modules/gapi/src/api/grunarg.cpp new file mode 100644 index 0000000000..30ae2adbc0 --- /dev/null +++ b/modules/gapi/src/api/grunarg.cpp @@ -0,0 +1,33 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "precomp.hpp" +#include + +cv::GRunArg::GRunArg() { +} + +cv::GRunArg::GRunArg(const cv::GRunArg &arg) + : cv::GRunArgBase(static_cast(arg)) + , meta(arg.meta) { +} + +cv::GRunArg::GRunArg(cv::GRunArg &&arg) + : cv::GRunArgBase(std::move(static_cast(arg))) + , meta(std::move(arg.meta)) { +} + +cv::GRunArg& cv::GRunArg::operator= (const cv::GRunArg &arg) { + cv::GRunArgBase::operator=(static_cast(arg)); + meta = arg.meta; + return *this; +} + +cv::GRunArg& cv::GRunArg::operator= (cv::GRunArg &&arg) { + cv::GRunArgBase::operator=(std::move(static_cast(arg))); + meta = std::move(arg.meta); + return *this; +} diff --git a/modules/gapi/src/backends/common/gbackend.hpp b/modules/gapi/src/backends/common/gbackend.hpp index 4914715fa7..576168db53 100644 --- a/modules/gapi/src/backends/common/gbackend.hpp +++ b/modules/gapi/src/backends/common/gbackend.hpp @@ -62,6 +62,8 @@ namespace magazine { template struct Class { template using MapT = std::unordered_map; + using MapM = std::unordered_map; + template MapT& slot() { return std::get::value>(slots); @@ -70,8 +72,17 @@ namespace magazine { { return std::get::value>(slots); } + template MapM& meta() + { + return metas[ade::util::type_list_index::value]; + } + template const MapM& meta() const + { + return metas[ade::util::type_list_index::value]; + } private: std::tuple...> slots; + std::array metas; }; } // namespace magazine diff --git a/modules/gapi/src/backends/common/gmetabackend.cpp b/modules/gapi/src/backends/common/gmetabackend.cpp new file mode 100644 index 0000000000..5364152b65 --- /dev/null +++ b/modules/gapi/src/backends/common/gmetabackend.cpp @@ -0,0 +1,105 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include "precomp.hpp" + +#include // compile args +#include // any +#include // GMeta + +#include "compiler/gobjref.hpp" // RcDesc +#include "compiler/gmodel.hpp" // GModel, Op +#include "backends/common/gbackend.hpp" +#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK! + +#include "backends/common/gmetabackend.hpp" + +namespace { + +class GraphMetaExecutable final: public cv::gimpl::GIslandExecutable { + std::string m_meta_tag; + +public: + GraphMetaExecutable(const ade::Graph& g, + const std::vector& nodes); + bool canReshape() const override; + void reshape(ade::Graph&, const cv::GCompileArgs&) override; + + void run(std::vector &&input_objs, + std::vector &&output_objs) override; +}; + +bool GraphMetaExecutable::canReshape() const { + return true; +} +void GraphMetaExecutable::reshape(ade::Graph&, const cv::GCompileArgs&) { + // do nothing here +} + +GraphMetaExecutable::GraphMetaExecutable(const ade::Graph& g, + const std::vector& nodes) { + // There may be only one node in the graph + GAPI_Assert(nodes.size() == 1u); + + cv::gimpl::GModel::ConstGraph cg(g); + const auto &op = cg.metadata(nodes[0]).get(); + GAPI_Assert(op.k.name == cv::gapi::streaming::detail::GMeta::id()); + m_meta_tag = op.k.tag; +} + +void GraphMetaExecutable::run(std::vector &&input_objs, + std::vector &&output_objs) { + GAPI_Assert(input_objs.size() == 1u); + GAPI_Assert(output_objs.size() == 1u); + + const cv::GRunArg in_arg = input_objs[0].second; + cv::GRunArgP out_arg = output_objs[0].second; + + auto it = in_arg.meta.find(m_meta_tag); + if (it == in_arg.meta.end()) { + cv::util::throw_error + (std::logic_error("Run-time meta " + + m_meta_tag + + " is not found in object " + + std::to_string(static_cast(input_objs[0].first.shape)) + + "/" + + std::to_string(input_objs[0].first.id))); + } + cv::util::get(out_arg) = it->second; +} + +class GraphMetaBackendImpl final: public cv::gapi::GBackend::Priv { + virtual void unpackKernel(ade::Graph &, + const ade::NodeHandle &, + const cv::GKernelImpl &) override { + // Do nothing here + } + + virtual EPtr compile(const ade::Graph& graph, + const cv::GCompileArgs&, + const std::vector& nodes, + const std::vector&, + const std::vector&) const override { + return EPtr{new GraphMetaExecutable(graph, nodes)}; + } +}; + +cv::gapi::GBackend graph_meta_backend() { + static cv::gapi::GBackend this_backend(std::make_shared()); + return this_backend; +} + +struct InGraphMetaKernel final: public cv::detail::KernelTag { + using API = cv::gapi::streaming::detail::GMeta; + static cv::gapi::GBackend backend() { return graph_meta_backend(); } + static int kernel() { return 42; } +}; + +} // anonymous namespace + +cv::gapi::GKernelPackage cv::gimpl::meta::kernels() { + return cv::gapi::kernels(); +} diff --git a/modules/gapi/src/backends/common/gmetabackend.hpp b/modules/gapi/src/backends/common/gmetabackend.hpp new file mode 100644 index 0000000000..56f61d0e3d --- /dev/null +++ b/modules/gapi/src/backends/common/gmetabackend.hpp @@ -0,0 +1,16 @@ +#ifndef OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP +#define OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP + +#include + +namespace cv { +namespace gimpl { +namespace meta { + +cv::gapi::GKernelPackage kernels(); + +} // namespace meta +} // namespace gimpl +} // namespace cv + +#endif // OPENCV_GAPI_SRC_COMMON_META_BACKEND_HPP diff --git a/modules/gapi/src/compiler/gcompiler.cpp b/modules/gapi/src/compiler/gcompiler.cpp index eb75f44e0e..f6fa398c17 100644 --- a/modules/gapi/src/compiler/gcompiler.cpp +++ b/modules/gapi/src/compiler/gcompiler.cpp @@ -35,6 +35,7 @@ #include "executor/gexecutor.hpp" #include "executor/gstreamingexecutor.hpp" #include "backends/common/gbackend.hpp" +#include "backends/common/gmetabackend.hpp" // #if !defined(GAPI_STANDALONE) @@ -58,7 +59,8 @@ namespace for (const auto &b : pkg.backends()) { aux_pkg = combine(aux_pkg, b.priv().auxiliaryKernels()); } - return combine(pkg, aux_pkg); + // Always include built-in meta<> implementation + return combine(pkg, aux_pkg, cv::gimpl::meta::kernels()); }; auto has_use_only = cv::gapi::getCompileArg(args); diff --git a/modules/gapi/src/compiler/gislandmodel.cpp b/modules/gapi/src/compiler/gislandmodel.cpp index 9ffc605372..4d0feaea71 100644 --- a/modules/gapi/src/compiler/gislandmodel.cpp +++ b/modules/gapi/src/compiler/gislandmodel.cpp @@ -357,26 +357,21 @@ void GIslandExecutable::run(GIslandExecutable::IInput &in, GIslandExecutable::IO for (auto &&it: ade::util::zip(ade::util::toRange(in_desc), ade::util::toRange(in_vector))) { - // FIXME: Not every Island expects a cv::Mat instead of own::Mat on input - // This kludge should go as a result of de-ownification const cv::GRunArg& in_data_orig = std::get<1>(it); cv::GRunArg in_data; -#if !defined(GAPI_STANDALONE) switch (in_data_orig.index()) { case cv::GRunArg::index_of(): - in_data = cv::GRunArg{cv::make_rmat(cv::util::get(in_data_orig))}; - break; - case cv::GRunArg::index_of(): - in_data = cv::GRunArg{(cv::util::get(in_data_orig))}; + // FIXME: This whole construct is ugly, from + // its writing to a need in this in general + in_data = cv::GRunArg{ cv::make_rmat(cv::util::get(in_data_orig)) + , in_data_orig.meta + }; break; default: in_data = in_data_orig; break; } -#else - in_data = in_data_orig; -#endif // GAPI_STANDALONE in_objs.emplace_back(std::get<0>(it), std::move(in_data)); } for (auto &&it: ade::util::indexed(ade::util::toRange(out_desc))) @@ -385,9 +380,27 @@ void GIslandExecutable::run(GIslandExecutable::IInput &in, GIslandExecutable::IO out.get(ade::util::checked_cast(ade::util::index(it)))); } run(std::move(in_objs), std::move(out_objs)); + + // Propagate in-graph meta down to the graph + // Note: this is not a complete implementation! Mainly this is a stub + // and the proper implementation should come later. + // + // Propagating the meta information here has its pros and cons. + // Pros: it works here uniformly for both regular and streaming cases, + // also for the majority of old-fashioned (synchronous) backends + // Cons: backends implementing the asynchronous run(IInput,IOutput) + // won't get it out of the box + cv::GRunArg::Meta stub_meta; + for (auto &&in_arg : in_vector) + { + stub_meta.insert(in_arg.meta.begin(), in_arg.meta.end()); + } + // Report output objects as "ready" to the executor, also post + // calculated in-graph meta for the objects for (auto &&it: out_objs) { - out.post(std::move(it.second)); // report output objects as "ready" to the executor + out.meta(it.second, stub_meta); + out.post(std::move(it.second)); } } diff --git a/modules/gapi/src/compiler/gislandmodel.hpp b/modules/gapi/src/compiler/gislandmodel.hpp index c2e7b96d45..e8eb73692b 100644 --- a/modules/gapi/src/compiler/gislandmodel.hpp +++ b/modules/gapi/src/compiler/gislandmodel.hpp @@ -172,6 +172,10 @@ struct GIslandExecutable::IOutput: public GIslandExecutable::IODesc { virtual GRunArgP get(int idx) = 0; // Allocate (wrap) a new data object for output idx virtual void post(GRunArgP&&) = 0; // Release the object back to the framework (mark available) virtual void post(EndOfStream&&) = 0; // Post end-of-stream marker back to the framework + + // Assign accumulated metadata to the given output object. + // This method can only be called after get() and before post(). + virtual void meta(const GRunArgP&, const GRunArg::Meta &) = 0; }; // GIslandEmitter - a backend-specific thing which feeds data into diff --git a/modules/gapi/src/executor/gexecutor.cpp b/modules/gapi/src/executor/gexecutor.cpp index d9f5cfafe6..66f3b24771 100644 --- a/modules/gapi/src/executor/gexecutor.cpp +++ b/modules/gapi/src/executor/gexecutor.cpp @@ -12,6 +12,8 @@ #include #include + +#include "api/gproto_priv.hpp" // ptr(GRunArgP) #include "executor/gexecutor.hpp" #include "compiler/passes/passes.hpp" @@ -105,6 +107,9 @@ void bindInArgExec(Mag& mag, const RcDesc &rc, const GRunArg &arg) mag_rmat = util::get(arg); break; default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); } + // FIXME: has to take extra care about meta here for this particuluar + // case, just because this function exists at all + mag.meta()[rc.id] = arg.meta; } void bindOutArgExec(Mag& mag, const RcDesc &rc, const GRunArgP &arg) @@ -131,7 +136,7 @@ cv::GRunArgP getObjPtrExec(Mag& mag, const RcDesc &rc) { return getObjPtr(mag, rc); } - return GRunArgP(&mag.template slot()[rc.id]); + return GRunArgP(&mag.slot()[rc.id]); } void writeBackExec(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg) @@ -155,6 +160,25 @@ void writeBackExec(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg) default: util::throw_error(std::logic_error("content type of the runtime argument does not match to resource description ?")); } } + +void assignMetaStubExec(Mag& mag, const RcDesc &rc, const cv::GRunArg::Meta &meta) { + switch (rc.shape) + { + case GShape::GARRAY: mag.meta()[rc.id] = meta; break; + case GShape::GOPAQUE: mag.meta()[rc.id] = meta; break; + case GShape::GSCALAR: mag.meta()[rc.id] = meta; break; + case GShape::GFRAME: mag.meta()[rc.id] = meta; break; + case GShape::GMAT: + mag.meta() [rc.id] = meta; + mag.meta()[rc.id] = meta; +#if !defined(GAPI_STANDALONE) + mag.meta()[rc.id] = meta; +#endif + break; + default: util::throw_error(std::logic_error("Unsupported GShape type")); break; + } +} + } // anonymous namespace }}} // namespace cv::gimpl::magazine @@ -231,11 +255,28 @@ public: class cv::gimpl::GExecutor::Output final: public cv::gimpl::GIslandExecutable::IOutput { cv::gimpl::Mag &mag; - virtual GRunArgP get(int idx) override { return magazine::getObjPtrExec(mag, desc()[idx]); } - virtual void post(GRunArgP&&) override { } // Do nothing here - virtual void post(EndOfStream&&) override {} // Do nothing here too + std::unordered_map out_idx; + + GRunArgP get(int idx) override + { + auto r = magazine::getObjPtrExec(mag, desc()[idx]); + // Remember the output port for this output object + out_idx[cv::gimpl::proto::ptr(r)] = idx; + return r; + } + void post(GRunArgP&&) override { } // Do nothing here + void post(EndOfStream&&) override {} // Do nothing here too + void meta(const GRunArgP &out, const GRunArg::Meta &m) override + { + const auto idx = out_idx.at(cv::gimpl::proto::ptr(out)); + magazine::assignMetaStubExec(mag, desc()[idx], m); + } public: - Output(cv::gimpl::Mag &m, const std::vector &rcs) : mag(m) { set(rcs); } + Output(cv::gimpl::Mag &m, const std::vector &rcs) + : mag(m) + { + set(rcs); + } }; void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args) @@ -330,7 +371,7 @@ void cv::gimpl::GExecutor::run(cv::gimpl::GRuntimeArgs &&args) // Run the script for (auto &op : m_ops) { - // (5) + // (5), (6) Input i{m_res, op.in_objects}; Output o{m_res, op.out_objects}; op.isl_exec->run(i, o); diff --git a/modules/gapi/src/executor/gstreamingexecutor.cpp b/modules/gapi/src/executor/gstreamingexecutor.cpp index 653d20e712..58789889a3 100644 --- a/modules/gapi/src/executor/gstreamingexecutor.cpp +++ b/modules/gapi/src/executor/gstreamingexecutor.cpp @@ -350,16 +350,14 @@ bool QueueReader::getInputVector(std::vector &in_queues, // value-initialized scalar) // It can also hold a constant value received with // Stop::Kind::CNST message (see above). - // FIXME: Variant move problem - isl_inputs[id] = const_cast(in_constants[id]); + isl_inputs[id] = in_constants[id]; continue; } q->pop(m_cmd[id]); if (!cv::util::holds_alternative(m_cmd[id])) { - // FIXME: Variant move problem - isl_inputs[id] = const_cast(cv::util::get(m_cmd[id])); + isl_inputs[id] = cv::util::get(m_cmd[id]); } else // A Stop sign { @@ -382,7 +380,7 @@ bool QueueReader::getInputVector(std::vector &in_queues, // NEXT time (on a next call to getInputVector()), the // "q==nullptr" check above will be triggered, but now // we need to make it manually: - isl_inputs[id] = const_cast(in_constants[id]); + isl_inputs[id] = in_constants[id]; } else { @@ -666,8 +664,7 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput Cmd cmd; if (cv::util::holds_alternative(post_iter->data)) { - // FIXME: That ugly VARIANT problem - cmd = Cmd{const_cast(cv::util::get(post_iter->data))}; + cmd = Cmd{cv::util::get(post_iter->data)}; } else { @@ -677,8 +674,7 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput } for (auto &&q : m_out_queues[out_idx]) { - // FIXME: This ugly VARIANT problem - q->push(const_cast(cmd)); + q->push(cmd); } post_iter = m_postings[out_idx].erase(post_iter); } @@ -708,6 +704,15 @@ class StreamingOutput final: public cv::gimpl::GIslandExecutable::IOutput } } } + void meta(const cv::GRunArgP &out, const cv::GRunArg::Meta &m) override + { + const auto it = m_postIdx.find(cv::gimpl::proto::ptr(out)); + GAPI_Assert(it != m_postIdx.end()); + + const auto out_iter = it->second.second; + cv::util::get(out_iter->data).meta = m; + } + public: explicit StreamingOutput(const cv::GMetaArgs &metas, std::vector< std::vector > &out_queues, @@ -769,6 +774,7 @@ void islandActorThread(std::vector in_rcs, // void collectorThread(std::vector in_queues, std::vector in_mapping, const std::size_t out_size, + const bool handle_stop, Q& out_queue) { // These flags are static now: regardless if the sync or @@ -783,9 +789,14 @@ void collectorThread(std::vector in_queues, while (true) { cv::GRunArgs this_result(out_size); - if (!qr.getResultsVector(in_queues, in_mapping, out_size, this_result)) + const bool ok = qr.getResultsVector(in_queues, in_mapping, out_size, this_result); + if (!ok) { - out_queue.push(Cmd{Stop{}}); + if (handle_stop) + { + out_queue.push(Cmd{Stop{}}); + } + // Terminate the thread anyway return; } out_queue.push(Cmd{Result{std::move(this_result), flags}}); @@ -1263,12 +1274,22 @@ void cv::gimpl::GStreamingExecutor::setSource(GRunArgs &&ins) // If there are desynchronized parts in the graph, there may be // multiple theads polling every separate (desynchronized) // branch in the graph individually. + const bool has_main_path = m_sink_sync.end() != + std::find(m_sink_sync.begin(), m_sink_sync.end(), -1); for (auto &&info : m_collector_map) { m_threads.emplace_back(collectorThread, info.second.queues, info.second.mapping, m_sink_queues.size(), + has_main_path ? info.first == -1 : true, // see below (*) std::ref(m_out_queue)); + + // (*) - there may be a problem with desynchronized paths when those work + // faster than the main path. In this case, the desync paths get "Stop" message + // earlier and thus broadcast it down to pipeline gets stopped when there is + // some "main path" data to process. This new collectorThread's flag regulates it: + // - desync paths should never post Stop message if there is a main path. + // - if there is no main path, than any desync path can terminate the execution. } state = State::READY; } diff --git a/modules/gapi/test/gapi_graph_meta_tests.cpp b/modules/gapi/test/gapi_graph_meta_tests.cpp new file mode 100644 index 0000000000..73c0da3c9e --- /dev/null +++ b/modules/gapi/test/gapi_graph_meta_tests.cpp @@ -0,0 +1,195 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. +// +// Copyright (C) 2020 Intel Corporation + +#include +#include + +#include "test_precomp.hpp" +#include "opencv2/gapi/streaming/meta.hpp" +#include "opencv2/gapi/streaming/cap.hpp" + +namespace opencv_test { + +namespace { +void initTestDataPath() { +#ifndef WINRT + static bool initialized = false; + if (!initialized) + { + // Since G-API has no own test data (yet), it is taken from the common space + const char* testDataPath = getenv("OPENCV_TEST_DATA_PATH"); + if (testDataPath != nullptr) { + cvtest::addDataSearchPath(testDataPath); + initialized = true; + } + } +#endif // WINRT +} +} // anonymous namespace + +TEST(GraphMeta, Trad_AccessInput) { + cv::GMat in; + cv::GMat out1 = cv::gapi::blur(in, cv::Size(3,3)); + cv::GOpaque out2 = cv::gapi::streaming::meta(in, "foo"); + cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2)); + + cv::Mat in_mat = cv::Mat::eye(cv::Size(64, 64), CV_8UC1); + cv::Mat out_mat; + int out_meta = 0; + + // manually set metadata in the input fields + auto inputs = cv::gin(in_mat); + inputs[0].meta["foo"] = 42; + + graph.apply(std::move(inputs), cv::gout(out_mat, out_meta)); + EXPECT_EQ(42, out_meta); +} + +TEST(GraphMeta, Trad_AccessTmp) { + cv::GMat in; + cv::GMat tmp = cv::gapi::blur(in, cv::Size(3,3)); + cv::GMat out1 = tmp+1; + cv::GOpaque out2 = cv::gapi::streaming::meta(tmp, "bar"); + cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2)); + + cv::Mat in_mat = cv::Mat::eye(cv::Size(64, 64), CV_8UC1); + cv::Mat out_mat; + float out_meta = 0.f; + + // manually set metadata in the input fields + auto inputs = cv::gin(in_mat); + inputs[0].meta["bar"] = 1.f; + + graph.apply(std::move(inputs), cv::gout(out_mat, out_meta)); + EXPECT_EQ(1.f, out_meta); +} + +TEST(GraphMeta, Trad_AccessOutput) { + cv::GMat in; + cv::GMat out1 = cv::gapi::blur(in, cv::Size(3,3)); + cv::GOpaque out2 = cv::gapi::streaming::meta(out1, "baz"); + cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2)); + + cv::Mat in_mat = cv::Mat::eye(cv::Size(64, 64), CV_8UC1); + cv::Mat out_mat; + std::string out_meta; + + // manually set metadata in the input fields + auto inputs = cv::gin(in_mat); + + // NOTE: Assigning explicitly an std::string is important, + // otherwise a "const char*" will be stored and won't be + // translated properly by util::any since std::string is + // used within the graph. + inputs[0].meta["baz"] = std::string("opencv"); + + graph.apply(std::move(inputs), cv::gout(out_mat, out_meta)); + EXPECT_EQ("opencv", out_meta); +} + +TEST(GraphMeta, Streaming_AccessInput) { + initTestDataPath(); + + cv::GMat in; + cv::GMat out1 = cv::gapi::blur(in, cv::Size(3,3)); + cv::GOpaque out2 = cv::gapi::streaming::seq_id(in); + cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2)); + + auto ccomp = graph.compileStreaming(); + ccomp.setSource(findDataFile("cv/video/768x576.avi", false)); + ccomp.start(); + + cv::Mat out_mat; + int64_t out_meta = 0; + int64_t expected_counter = 0; + + while (ccomp.pull(cv::gout(out_mat, out_meta))) { + EXPECT_EQ(expected_counter, out_meta); + ++expected_counter; + } +} + +TEST(GraphMeta, Streaming_AccessOutput) { + initTestDataPath(); + + cv::GMat in; + cv::GMat out1 = cv::gapi::blur(in, cv::Size(3,3)); + cv::GOpaque out2 = cv::gapi::streaming::seq_id(out1); + cv::GOpaque out3 = cv::gapi::streaming::timestamp(out1); + cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2, out3)); + + auto ccomp = graph.compileStreaming(); + ccomp.setSource(findDataFile("cv/video/768x576.avi", false)); + ccomp.start(); + + cv::Mat out_mat; + int64_t out_meta = 0; + int64_t out_timestamp = 0; + int64_t expected_counter = 0; + int64_t prev_timestamp = -1; + + while (ccomp.pull(cv::gout(out_mat, out_meta, out_timestamp))) { + EXPECT_EQ(expected_counter, out_meta); + ++expected_counter; + + EXPECT_NE(prev_timestamp, out_timestamp); + prev_timestamp = out_timestamp; + } +} + +TEST(GraphMeta, Streaming_AccessDesync) { + initTestDataPath(); + + cv::GMat in; + cv::GOpaque out1 = cv::gapi::streaming::seq_id(in); + cv::GOpaque out2 = cv::gapi::streaming::timestamp(in); + cv::GMat out3 = cv::gapi::blur(in, cv::Size(3,3)); + + cv::GMat tmp = cv::gapi::streaming::desync(in); + cv::GScalar mean = cv::gapi::mean(tmp); + cv::GOpaque out4 = cv::gapi::streaming::seq_id(mean); + cv::GOpaque out5 = cv::gapi::streaming::timestamp(mean); + cv::GComputation graph(cv::GIn(in), cv::GOut(out1, out2, out3, out4, out5)); + + auto ccomp = graph.compileStreaming(); + ccomp.setSource(findDataFile("cv/video/768x576.avi", false)); + ccomp.start(); + + cv::optional out_sync_id; + cv::optional out_sync_ts; + cv::optional out_sync_mat; + + cv::optional out_desync_id; + cv::optional out_desync_ts; + + std::unordered_set sync_ids; + std::unordered_set desync_ids; + + while (ccomp.pull(cv::gout(out_sync_id, out_sync_ts, out_sync_mat, + out_desync_id, out_desync_ts))) { + if (out_sync_id.has_value()) { + CV_Assert(out_sync_ts.has_value()); + CV_Assert(out_sync_mat.has_value()); + sync_ids.insert(out_sync_id.value()); + } + if (out_desync_id.has_value()) { + CV_Assert(out_desync_ts.has_value()); + desync_ids.insert(out_desync_id.value()); + } + } + // Visually report that everything is really ok + std::cout << sync_ids.size() << " vs " << desync_ids.size() << std::endl; + + // Desync path should generate less objects than the synchronized one + EXPECT_GE(sync_ids.size(), desync_ids.size()); + + // ..but all desynchronized IDs must be present in the synchronized set + for (auto &&d_id : desync_ids) { + EXPECT_TRUE(sync_ids.count(d_id) > 0); + } +} + +} // namespace opencv_test From fe9a8ebea2a6f0cfe1ac3c720548695e5d3a6ce2 Mon Sep 17 00:00:00 2001 From: shioko Date: Tue, 17 Nov 2020 15:02:55 +0000 Subject: [PATCH 111/152] Fix typo 'Applicatioin' --- apps/interactive-calibration/parametersController.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/interactive-calibration/parametersController.cpp b/apps/interactive-calibration/parametersController.cpp index c76b915c63..3bcf5b86e9 100644 --- a/apps/interactive-calibration/parametersController.cpp +++ b/apps/interactive-calibration/parametersController.cpp @@ -32,7 +32,7 @@ bool calib::parametersController::loadFromFile(const std::string &inputFileName) if(!reader.isOpened()) { std::cerr << "Warning: Unable to open " << inputFileName << - " Applicatioin stated with default advanced parameters" << std::endl; + " Application started with default advanced parameters" << std::endl; return true; } From 2c6a2f0381b6b51d93b7db7907c55e5c944b6ce9 Mon Sep 17 00:00:00 2001 From: Orest Chura Date: Tue, 17 Nov 2020 18:59:59 +0300 Subject: [PATCH 112/152] Merge pull request #18790 from OrestChura:oc/fitLine [G-API]: fitLine() Standard Kernel Implementation * fitLine API (Mat, 32S, 32F) (2D, 3D) * Complete fitLine kernel & accuracy tests - initialization for vectors of cv::Point and Mats via vectors added - comparison functions for Vec added: - straight average difference comparison - comparison by equasion for 2d line - stream overload for cv::DistanceTypes added * Fix precommit warnings * Fix docs * Address comments Try to fix warning * Disable warning in tests --- modules/gapi/include/opencv2/gapi/imgproc.hpp | 213 +++++++++++++++++- modules/gapi/src/api/kernels_imgproc.cpp | 48 ++++ modules/gapi/src/backends/cpu/gcpuimgproc.cpp | 80 +++++++ .../gapi/test/common/gapi_imgproc_tests.hpp | 16 ++ .../test/common/gapi_imgproc_tests_inl.hpp | 186 +++++++++++++++ .../gapi/test/common/gapi_tests_common.hpp | 205 +++++++++++++++++ .../gapi/test/cpu/gapi_imgproc_tests_cpu.cpp | 68 ++++++ 7 files changed, 815 insertions(+), 1 deletion(-) diff --git a/modules/gapi/include/opencv2/gapi/imgproc.hpp b/modules/gapi/include/opencv2/gapi/imgproc.hpp index 0e4254cb87..e41c2507f2 100644 --- a/modules/gapi/include/opencv2/gapi/imgproc.hpp +++ b/modules/gapi/include/opencv2/gapi/imgproc.hpp @@ -46,7 +46,7 @@ void validateFindingContoursMeta(const int depth, const int chan, const int mode // Checks if the passed mat is a set of n-dimentional points of the given depth bool isPointsVector(const int chan, const cv::Size &size, const int depth, - const int n, const int ddepth) + const int n, const int ddepth = -1) { return (ddepth == depth || ddepth < 0) && ((chan == n && (size.height == 1 || size.width == 1)) || @@ -234,6 +234,70 @@ namespace imgproc { } }; + G_TYPED_KERNEL(GFitLine2DMat, (GMat,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine2DMat") { + static GOpaqueDesc outMeta(GMatDesc in,DistanceTypes,double,double,double) { + GAPI_Assert(isPointsVector(in.chan, in.size, in.depth, 2, -1)); + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine2DVector32S, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine2DVector32S") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine2DVector32F, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine2DVector32F") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine2DVector64F, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine2DVector64F") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine3DMat, (GMat,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine3DMat") { + static GOpaqueDesc outMeta(GMatDesc in,int,double,double,double) { + GAPI_Assert(isPointsVector(in.chan, in.size, in.depth, 3, -1)); + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine3DVector32S, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine3DVector32S") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine3DVector32F, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine3DVector32F") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + + G_TYPED_KERNEL(GFitLine3DVector64F, + (GArray,DistanceTypes,double,double,double)>, + "org.opencv.imgproc.shape.fitLine3DVector64F") { + static GOpaqueDesc outMeta(GArrayDesc,DistanceTypes,double,double,double) { + return empty_gopaque_desc(); + } + }; + G_TYPED_KERNEL(GBGR2RGB, , "org.opencv.imgproc.colorconvert.bgr2rgb") { static GMatDesc outMeta(GMatDesc in) { return in; // type still remains CV_8UC3; @@ -1111,6 +1175,153 @@ Calculates the up-right bounding rectangle of a point set. */ GAPI_EXPORTS GOpaque boundingRect(const GArray& src); +/** @brief Fits a line to a 2D point set. + +The function fits a line to a 2D point set by minimizing \f$\sum_i \rho(r_i)\f$ where +\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance +function, one of the following: +- DIST_L2 +\f[\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\f] +- DIST_L1 +\f[\rho (r) = r\f] +- DIST_L12 +\f[\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f] +- DIST_FAIR +\f[\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\f] +- DIST_WELSCH +\f[\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\f] +- DIST_HUBER +\f[\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f] + +The algorithm is based on the M-estimator ( ) technique +that iteratively fits the line using the weighted least-squares algorithm. After each iteration the +weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ . + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DMat" + +@param src Input set of 2D points stored in one of possible containers: Mat, +std::vector, std::vector, std::vector. + +@note In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row +or column if there are N channels, or have N columns if there is a single channel. + +@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER +and @ref DIST_C are not suppored. +@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value +is chosen. +@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the +line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen. +@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps. +If it is 0, a default value is chosen. + +@return Output line parameters: a vector of 4 elements (like Vec4f) - (vx, vy, x0, y0), +where (vx, vy) is a normalized vector collinear to the line and (x0, y0) is a point on the line. + */ +GAPI_EXPORTS GOpaque fitLine2D(const GMat& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32S" + + */ +GAPI_EXPORTS GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector32F" + + */ +GAPI_EXPORTS GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine2DVector64F" + + */ +GAPI_EXPORTS GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @brief Fits a line to a 3D point set. + +The function fits a line to a 3D point set by minimizing \f$\sum_i \rho(r_i)\f$ where +\f$r_i\f$ is a distance between the \f$i^{th}\f$ point, the line and \f$\rho(r)\f$ is a distance +function, one of the following: +- DIST_L2 +\f[\rho (r) = r^2/2 \quad \text{(the simplest and the fastest least-squares method)}\f] +- DIST_L1 +\f[\rho (r) = r\f] +- DIST_L12 +\f[\rho (r) = 2 \cdot ( \sqrt{1 + \frac{r^2}{2}} - 1)\f] +- DIST_FAIR +\f[\rho \left (r \right ) = C^2 \cdot \left ( \frac{r}{C} - \log{\left(1 + \frac{r}{C}\right)} \right ) \quad \text{where} \quad C=1.3998\f] +- DIST_WELSCH +\f[\rho \left (r \right ) = \frac{C^2}{2} \cdot \left ( 1 - \exp{\left(-\left(\frac{r}{C}\right)^2\right)} \right ) \quad \text{where} \quad C=2.9846\f] +- DIST_HUBER +\f[\rho (r) = \fork{r^2/2}{if \(r < C\)}{C \cdot (r-C/2)}{otherwise} \quad \text{where} \quad C=1.345\f] + +The algorithm is based on the M-estimator ( ) technique +that iteratively fits the line using the weighted least-squares algorithm. After each iteration the +weights \f$w_i\f$ are adjusted to be inversely proportional to \f$\rho(r_i)\f$ . + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DMat" + +@param src Input set of 3D points stored in one of possible containers: Mat, +std::vector, std::vector, std::vector. + +@note In case of an N-dimentional points' set given, Mat should be 2-dimensional, have a single row +or column if there are N channels, or have N columns if there is a single channel. + +@param distType Distance used by the M-estimator, see #DistanceTypes. @ref DIST_USER +and @ref DIST_C are not suppored. +@param param Numerical parameter ( C ) for some types of distances. If it is 0, an optimal value +is chosen. +@param reps Sufficient accuracy for the radius (distance between the coordinate origin and the +line). 1.0 would be a good default value for reps. If it is 0, a default value is chosen. +@param aeps Sufficient accuracy for the angle. 0.01 would be a good default value for aeps. +If it is 0, a default value is chosen. + +@return Output line parameters: a vector of 6 elements (like Vec6f) - (vx, vy, vz, x0, y0, z0), +where (vx, vy, vz) is a normalized vector collinear to the line and (x0, y0, z0) is a point on +the line. + */ +GAPI_EXPORTS GOpaque fitLine3D(const GMat& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32S" + + */ +GAPI_EXPORTS GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector32F" + + */ +GAPI_EXPORTS GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + +/** @overload + +@note Function textual ID is "org.opencv.imgproc.shape.fitLine3DVector64F" + + */ +GAPI_EXPORTS GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param = 0., const double reps = 0., + const double aeps = 0.); + //! @} gapi_shape //! @addtogroup gapi_colorconvert diff --git a/modules/gapi/src/api/kernels_imgproc.cpp b/modules/gapi/src/api/kernels_imgproc.cpp index faf8de54c7..41085a7ebf 100644 --- a/modules/gapi/src/api/kernels_imgproc.cpp +++ b/modules/gapi/src/api/kernels_imgproc.cpp @@ -164,6 +164,54 @@ GOpaque boundingRect(const GArray& src) return imgproc::GBoundingRectVector32F::on(src); } +GOpaque fitLine2D(const GMat& src, const DistanceTypes distType, const double param, + const double reps, const double aeps) +{ + return imgproc::GFitLine2DMat::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine2DVector32S::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine2DVector32F::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine2D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine2DVector64F::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine3D(const GMat& src, const DistanceTypes distType, const double param, + const double reps, const double aeps) +{ + return imgproc::GFitLine3DMat::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine3DVector32S::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine3DVector32F::on(src, distType, param, reps, aeps); +} + +GOpaque fitLine3D(const GArray& src, const DistanceTypes distType, + const double param, const double reps, const double aeps) +{ + return imgproc::GFitLine3DVector64F::on(src, distType, param, reps, aeps); +} + GMat BGR2RGB(const GMat& src) { return imgproc::GBGR2RGB::on(src); diff --git a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp index 9eca0f12f0..6cbf0d32f0 100644 --- a/modules/gapi/src/backends/cpu/gcpuimgproc.cpp +++ b/modules/gapi/src/backends/cpu/gcpuimgproc.cpp @@ -285,6 +285,78 @@ GAPI_OCV_KERNEL(GCPUBoundingRectVector32F, cv::gapi::imgproc::GBoundingRectVecto } }; +GAPI_OCV_KERNEL(GCPUFitLine2DMat, cv::gapi::imgproc::GFitLine2DMat) +{ + static void run(const cv::Mat& in, const cv::DistanceTypes distType, const double param, + const double reps, const double aeps, cv::Vec4f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine2DVector32S, cv::gapi::imgproc::GFitLine2DVector32S) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec4f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine2DVector32F, cv::gapi::imgproc::GFitLine2DVector32F) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec4f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine2DVector64F, cv::gapi::imgproc::GFitLine2DVector64F) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec4f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine3DMat, cv::gapi::imgproc::GFitLine3DMat) +{ + static void run(const cv::Mat& in, const cv::DistanceTypes distType, const double param, + const double reps, const double aeps, cv::Vec6f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine3DVector32S, cv::gapi::imgproc::GFitLine3DVector32S) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec6f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine3DVector32F, cv::gapi::imgproc::GFitLine3DVector32F) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec6f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + +GAPI_OCV_KERNEL(GCPUFitLine3DVector64F, cv::gapi::imgproc::GFitLine3DVector64F) +{ + static void run(const std::vector& in, const cv::DistanceTypes distType, + const double param, const double reps, const double aeps, cv::Vec6f& out) + { + cv::fitLine(in, out, distType, param, reps, aeps); + } +}; + GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB) { static void run(const cv::Mat& in, cv::Mat &out) @@ -569,6 +641,14 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels() , GCPUBoundingRectMat , GCPUBoundingRectVector32S , GCPUBoundingRectVector32F + , GCPUFitLine2DMat + , GCPUFitLine2DVector32S + , GCPUFitLine2DVector32F + , GCPUFitLine2DVector64F + , GCPUFitLine3DMat + , GCPUFitLine3DVector32S + , GCPUFitLine3DVector32F + , GCPUFitLine3DVector64F , GCPUYUV2RGB , GCPUBGR2I420 , GCPURGB2I420 diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp index b27da28c87..b48b7b6732 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp @@ -83,6 +83,22 @@ GAPI_TEST_FIXTURE(BoundingRectVector32STest, initNothing, FIXTURE_API(CompareRec GAPI_TEST_FIXTURE(BoundingRectVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF) GAPI_TEST_FIXTURE(BGR2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(RGB2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) +GAPI_TEST_FIXTURE(FitLine2DMatVectorTest, initMatByPointsVectorRandU, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine2DVector32STest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine2DVector32FTest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine2DVector64FTest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine3DMatVectorTest, initMatByPointsVectorRandU, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine3DVector32STest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine3DVector32FTest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) +GAPI_TEST_FIXTURE(FitLine3DVector64FTest, initNothing, + FIXTURE_API(CompareVecs,cv::DistanceTypes), 2, cmpF, distType) GAPI_TEST_FIXTURE(BGR2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(RGB2YUVTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) GAPI_TEST_FIXTURE(BGR2I420Test, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF) diff --git a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp index 91e676c5e7..2a4f2e64ea 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests_inl.hpp @@ -752,6 +752,192 @@ TEST_P(BoundingRectVector32FTest, AccuracyTest) } } +TEST_P(FitLine2DMatVectorTest, AccuracyTest) +{ + cv::Vec4f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_mat1, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine2DVector32STest, AccuracyTest) +{ + cv::Vec4f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine2DVector32FTest, AccuracyTest) +{ + cv::Vec4f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine2DVector64FTest, AccuracyTest) +{ + cv::Vec4f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine2D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine3DMatVectorTest, AccuracyTest) +{ + cv::Vec6f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + // G-API code ////////////////////////////////////////////////////////////// + cv::GMat in; + auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_mat1), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_mat1, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine3DVector32STest, AccuracyTest) +{ + cv::Vec6f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine3DVector32FTest, AccuracyTest) +{ + cv::Vec6f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + +TEST_P(FitLine3DVector64FTest, AccuracyTest) +{ + cv::Vec6f out_vec_gapi, out_vec_ocv; + double paramDefault = 0., repsDefault = 0., aepsDefault = 0.; + + std::vector in_vec; + initPointsVectorRandU(sz.width, in_vec); + + // G-API code ////////////////////////////////////////////////////////////// + cv::GArray in; + auto out = cv::gapi::fitLine3D(in, distType, paramDefault, repsDefault, aepsDefault); + + cv::GComputation c(cv::GIn(in), cv::GOut(out)); + c.apply(cv::gin(in_vec), cv::gout(out_vec_gapi), getCompileArgs()); + // OpenCV code ///////////////////////////////////////////////////////////// + { + cv::fitLine(in_vec, out_vec_ocv, distType, paramDefault, repsDefault, aepsDefault); + } + // Comparison ////////////////////////////////////////////////////////////// + { + EXPECT_TRUE(cmpF(out_vec_gapi, out_vec_ocv)); + } +} + TEST_P(BGR2RGBTest, AccuracyTest) { // G-API code ////////////////////////////////////////////////////////////// diff --git a/modules/gapi/test/common/gapi_tests_common.hpp b/modules/gapi/test/common/gapi_tests_common.hpp index 948476fa10..514fa2be38 100644 --- a/modules/gapi/test/common/gapi_tests_common.hpp +++ b/modules/gapi/test/common/gapi_tests_common.hpp @@ -74,6 +74,50 @@ namespace } #endif // WINRT } + + template inline void initPointRandU(cv::RNG &rng, cv::Point_& pt) + { + GAPI_Assert(std::is_integral::value); + pt = cv::Point_(static_cast(static_cast(rng(CHAR_MAX + 1U))), + static_cast(static_cast(rng(CHAR_MAX + 1U)))); + } + + template inline void initPointRandU(cv::RNG &rng, cv::Point3_& pt) + { + GAPI_Assert(std::is_integral::value); + pt = cv::Point3_(static_cast(static_cast(rng(CHAR_MAX + 1U))), + static_cast(static_cast(rng(CHAR_MAX + 1U))), + static_cast(static_cast(rng(CHAR_MAX + 1U)))); + } + + template inline void initFloatPointRandU(cv::RNG &rng, cv::Point_ &pt) + { + GAPI_Assert(std::is_floating_point::value); + static const int fscale = 256; // avoid bits near ULP, generate stable test input + pt = cv::Point_(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + } + + template<> inline void initPointRandU(cv::RNG &rng, cv::Point2f &pt) + { initFloatPointRandU(rng, pt); } + + template<> inline void initPointRandU(cv::RNG &rng, cv::Point2d &pt) + { initFloatPointRandU(rng, pt); } + + template inline void initFloatPointRandU(cv::RNG &rng, cv::Point3_ &pt) + { + GAPI_Assert(std::is_floating_point::value); + static const int fscale = 256; // avoid bits near ULP, generate stable test input + pt = cv::Point3_(rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale), + rng.uniform(0, 255 * fscale) / static_cast(fscale)); + } + + template<> inline void initPointRandU(cv::RNG &rng, cv::Point3f &pt) + { initFloatPointRandU(rng, pt); } + + template<> inline void initPointRandU(cv::RNG &rng, cv::Point3d &pt) + { initFloatPointRandU(rng, pt); } } // namespace namespace opencv_test @@ -279,6 +323,80 @@ public: } } + template + inline void initPointRandU(cv::RNG& rng, T& pt) + { ::initPointRandU(rng, pt); } + +// Disable unreachable code warning for MSVS 2015 +#if defined _MSC_VER && _MSC_VER < 1910 /*MSVS 2017*/ +#pragma warning(push) +#pragma warning(disable: 4702) +#endif + // initialize std::vector>/std::vector> + template class Pt> + void initPointsVectorRandU(const int sz_in, std::vector> &vec_) + { + cv::RNG& rng = theRNG(); + + vec_.clear(); + vec_.reserve(sz_in); + + for (int i = 0; i < sz_in; i++) + { + Pt pt; + initPointRandU(rng, pt); + vec_.emplace_back(pt); + } + } +#if defined _MSC_VER && _MSC_VER < 1910 /*MSVS 2017*/ +#pragma warning(pop) +#endif + + template + inline void initMatByPointsVectorRandU(const cv::Size &sz_in) + { + std::vector in_vector; + initPointsVectorRandU(sz_in.width, in_vector); + in_mat1 = cv::Mat(in_vector, true); + } + + // initialize Mat by a vector of Points + template