diff --git a/CMakeLists.txt b/CMakeLists.txt index 41a2815894..9fd237b0e6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -249,7 +249,7 @@ OCV_OPTION(BUILD_CLAPACK "Build CLapack from source" (((WIN3 # Optional 3rd party components # =================================================== -OCV_OPTION(WITH_1394 "Include IEEE1394 support" ON +OCV_OPTION(WITH_1394 "Include IEEE1394 support" OFF VISIBLE_IF NOT ANDROID AND NOT IOS AND NOT XROS AND NOT WINRT VERIFY HAVE_DC1394_2) OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O (iOS/visionOS/Mac)" ON @@ -771,7 +771,7 @@ if(UNIX OR MINGW) elseif(EMSCRIPTEN) # no need to link to system libs with emscripten elseif(QNXNTO) - set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} m) + set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} m regex) elseif(MINGW) set(OPENCV_LINKER_LIBS ${OPENCV_LINKER_LIBS} pthread) else() diff --git a/cmake/OpenCVFindLibsGrfmt.cmake b/cmake/OpenCVFindLibsGrfmt.cmake index bc23bb241b..340753e6fa 100644 --- a/cmake/OpenCVFindLibsGrfmt.cmake +++ b/cmake/OpenCVFindLibsGrfmt.cmake @@ -24,7 +24,13 @@ else() set(_zlib_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES ${CMAKE_FIND_LIBRARY_SUFFIXES}) set(CMAKE_FIND_LIBRARY_SUFFIXES .so) endif() - find_package(ZLIB "${MIN_VER_ZLIB}") + if(QNX) + set(ZLIB_FOUND TRUE) + set(ZLIB_LIBRARY z) + set(ZLIB_LIBRARIES z) + else() + find_package(ZLIB "${MIN_VER_ZLIB}") + endif() if(ANDROID) set(CMAKE_FIND_LIBRARY_SUFFIXES ${_zlib_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES}) unset(_zlib_ORIG_CMAKE_FIND_LIBRARY_SUFFIXES) @@ -67,7 +73,13 @@ if(WITH_JPEG) ocv_clear_vars(JPEG_FOUND) else() ocv_clear_internal_cache_vars(JPEG_LIBRARY JPEG_INCLUDE_DIR) - find_package(JPEG) + if(QNX) + set(JPEG_LIBRARY jpeg) + set(JPEG_LIBRARIES jpeg) + set(JPEG_FOUND TRUE) + else() + find_package(JPEG) + endif() endif() if(NOT JPEG_FOUND) @@ -106,7 +118,13 @@ if(WITH_TIFF) ocv_clear_vars(TIFF_FOUND) else() ocv_clear_internal_cache_vars(TIFF_LIBRARY TIFF_INCLUDE_DIR) - include(FindTIFF) + if(QNX) + set(TIFF_LIBRARY tiff) + set(TIFF_LIBRARIES tiff) + set(TIFF_FOUND TRUE) + else() + include(FindTIFF) + endif() if(TIFF_FOUND) ocv_parse_header("${TIFF_INCLUDE_DIR}/tiff.h" TIFF_VERSION_LINES TIFF_VERSION_CLASSIC TIFF_VERSION_BIG TIFF_VERSION TIFF_BIGTIFF_VERSION) endif() diff --git a/cmake/OpenCVInstallLayout.cmake b/cmake/OpenCVInstallLayout.cmake index 8c66622ed0..bbe5f7b5c3 100644 --- a/cmake/OpenCVInstallLayout.cmake +++ b/cmake/OpenCVInstallLayout.cmake @@ -56,6 +56,24 @@ elseif(WIN32 AND CMAKE_HOST_SYSTEM_NAME MATCHES Windows) ocv_update(OPENCV_JNI_INSTALL_PATH "java${_jni_suffix}") ocv_update(OPENCV_JNI_BIN_INSTALL_PATH "${OPENCV_JNI_INSTALL_PATH}") +elseif(QNX) + ocv_update(OPENCV_BIN_INSTALL_PATH "${CPUVARDIR}/usr/bin") + ocv_update(OPENCV_TEST_INSTALL_PATH "${OPENCV_BIN_INSTALL_PATH}") + ocv_update(OPENCV_SAMPLES_BIN_INSTALL_PATH "${OPENCV_BIN_INSTALL_PATH}") + ocv_update(OPENCV_LIB_INSTALL_PATH "${CPUVARDIR}/usr/lib") + ocv_update(OPENCV_LIB_ARCHIVE_INSTALL_PATH "${OPENCV_LIB_INSTALL_PATH}") + ocv_update(OPENCV_3P_LIB_INSTALL_PATH "${CPUVARDIR}/usr/lib") + ocv_update(OPENCV_CONFIG_INSTALL_PATH "${CPUVARDIR}/usr/share/OpenCV") + ocv_update(OPENCV_INCLUDE_INSTALL_PATH "usr/include/OpenCV/opencv4") + ocv_update(OPENCV_OTHER_INSTALL_PATH "usr/share/OpenCV") + ocv_update(OPENCV_SAMPLES_SRC_INSTALL_PATH "samples/native") + ocv_update(OPENCV_LICENSES_INSTALL_PATH "${OPENCV_OTHER_INSTALL_PATH}/licenses") + ocv_update(OPENCV_TEST_DATA_INSTALL_PATH "${OPENCV_OTHER_INSTALL_PATH}/testdata") + ocv_update(OPENCV_DOC_INSTALL_PATH "doc") + ocv_update(OPENCV_JAR_INSTALL_PATH "${CMAKE_INSTALL_DATAROOTDIR}/java/opencv4") + ocv_update(OPENCV_JNI_INSTALL_PATH "${OPENCV_JAR_INSTALL_PATH}") + ocv_update(OPENCV_JNI_BIN_INSTALL_PATH "${OPENCV_JNI_INSTALL_PATH}") + else() # UNIX include(GNUInstallDirs) diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index 5a44548824..5344b15974 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -309,7 +309,11 @@ function(ocv_include_directories) dir MATCHES "/usr/include$") # workaround for GCC 6.x bug else() - include_directories(AFTER SYSTEM "${dir}") + if(${CMAKE_SYSTEM_NAME} MATCHES QNX) + include_directories(AFTER "${dir}") + else() + include_directories(AFTER SYSTEM "${dir}") + endif() endif() endforeach() include_directories(BEFORE ${__add_before}) diff --git a/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown b/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown index 182f1c845b..06716fe5dc 100644 --- a/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown +++ b/doc/py_tutorials/py_calib3d/py_calibration/py_calibration.markdown @@ -216,8 +216,6 @@ for i in range(len(objpoints)): print( "total error: {}".format(mean_error/len(objpoints)) ) @endcode -Additional Resources --------------------- Exercises --------- diff --git a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown index ada22222cb..811e940714 100644 --- a/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown +++ b/doc/py_tutorials/py_calib3d/py_epipolar_geometry/py_epipolar_geometry.markdown @@ -158,9 +158,6 @@ side. That meeting point is the epipole. For better results, images with good resolution and many non-planar points should be used. -Additional Resources --------------------- - Exercises --------- diff --git a/doc/py_tutorials/py_calib3d/py_pose/py_pose.markdown b/doc/py_tutorials/py_calib3d/py_pose/py_pose.markdown index bd0ac88ef8..83a75e96f3 100644 --- a/doc/py_tutorials/py_calib3d/py_pose/py_pose.markdown +++ b/doc/py_tutorials/py_calib3d/py_pose/py_pose.markdown @@ -119,9 +119,3 @@ And look at the result below: If you are interested in graphics, augmented reality etc, you can use OpenGL to render more complicated figures. - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown b/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown index 1594f77200..de9eaaf8c6 100644 --- a/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown +++ b/doc/py_tutorials/py_core/py_basic_ops/py_basic_ops.markdown @@ -51,23 +51,6 @@ You can modify the pixel values the same way. Numpy is an optimized library for fast array calculations. So simply accessing each and every pixel value and modifying it will be very slow and it is discouraged. -@note The above method is normally used for selecting a region of an array, say the first 5 rows -and last 3 columns. For individual pixel access, the Numpy array methods, array.item() and -array.itemset() are considered better. They always return a scalar, however, so if you want to access -all the B,G,R values, you will need to call array.item() separately for each value. - -Better pixel accessing and editing method : -@code{.py} -# accessing RED value ->>> img.item(10,10,2) -59 - -# modifying RED value ->>> img.itemset((10,10,2),100) ->>> img.item(10,10,2) -100 -@endcode - Accessing Image Properties -------------------------- @@ -195,9 +178,3 @@ See the result below. (Image is displayed with matplotlib. So RED and BLUE chann interchanged): ![image](images/border.jpg) - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown b/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown index 4b6e8bd3c1..e863cb9f62 100644 --- a/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown +++ b/doc/py_tutorials/py_core/py_image_arithmetics/py_image_arithmetics.markdown @@ -110,9 +110,6 @@ img2_fg. ![image](images/overlay.jpg) -Additional Resources --------------------- - Exercises --------- diff --git a/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown b/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown index 7d63ffadef..f851433f59 100644 --- a/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown +++ b/doc/py_tutorials/py_core/py_optimization/py_optimization.markdown @@ -163,6 +163,3 @@ Additional Resources 2. Scipy Lecture Notes - [Advanced Numpy](http://scipy-lectures.github.io/advanced/advanced_numpy/index.html#advanced-numpy) 3. [Timing and Profiling in IPython](http://pynash.org/2013/03/06/timing-and-profiling/) - -Exercises ---------- diff --git a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown index 29e385c64d..d9c9cb2429 100644 --- a/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown +++ b/doc/py_tutorials/py_feature2d/py_fast/py_fast.markdown @@ -138,6 +138,3 @@ Additional Resources 2. Edward Rosten, Reid Porter, and Tom Drummond, "Faster and better: a machine learning approach to corner detection" in IEEE Trans. Pattern Analysis and Machine Intelligence, 2010, vol 32, pp. 105-119. - -Exercises ---------- diff --git a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown index 4597c6bfcf..bb2455fca7 100644 --- a/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown +++ b/doc/py_tutorials/py_feature2d/py_feature_homography/py_feature_homography.markdown @@ -102,9 +102,3 @@ plt.imshow(img3, 'gray'),plt.show() See the result below. Object is marked in white color in cluttered image: ![image](images/homography_findobj.jpg) - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_feature2d/py_features_meaning/py_features_meaning.markdown b/doc/py_tutorials/py_feature2d/py_features_meaning/py_features_meaning.markdown index 3aa00b715a..5e8bca6813 100644 --- a/doc/py_tutorials/py_feature2d/py_features_meaning/py_features_meaning.markdown +++ b/doc/py_tutorials/py_feature2d/py_features_meaning/py_features_meaning.markdown @@ -81,9 +81,3 @@ or do whatever you want. So in this module, we are looking to different algorithms in OpenCV to find features, describe them, match them etc. - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.markdown b/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.markdown index aeab98bfd6..bb38a77927 100644 --- a/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.markdown +++ b/doc/py_tutorials/py_feature2d/py_matcher/py_matcher.markdown @@ -209,9 +209,3 @@ plt.imshow(img3,),plt.show() See the result below: ![image](images/matcher_flann.jpg) - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown index 73d01aaaa1..c86a79c8af 100644 --- a/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown +++ b/doc/py_tutorials/py_feature2d/py_orb/py_orb.markdown @@ -93,6 +93,3 @@ Additional Resources -# Ethan Rublee, Vincent Rabaud, Kurt Konolige, Gary R. Bradski: ORB: An efficient alternative to SIFT or SURF. ICCV 2011: 2564-2571. - -Exercises ---------- diff --git a/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown b/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown index c5d29493e4..00d8d0a288 100644 --- a/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown +++ b/doc/py_tutorials/py_feature2d/py_shi_tomasi/py_shi_tomasi.markdown @@ -67,9 +67,3 @@ See the result below: ![image](images/shitomasi_block1.jpg) This function is more appropriate for tracking. We will see that when its time comes. - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown b/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown index bbbae6a3e6..77caab6c06 100644 --- a/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown +++ b/doc/py_tutorials/py_feature2d/py_sift_intro/py_sift_intro.markdown @@ -160,9 +160,3 @@ Here kp will be a list of keypoints and des is a numpy array of shape So we got keypoints, descriptors etc. Now we want to see how to match keypoints in different images. That we will learn in coming chapters. - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown index 5bcd91cce8..e856c56ecd 100644 --- a/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown +++ b/doc/py_tutorials/py_feature2d/py_surf_intro/py_surf_intro.markdown @@ -155,9 +155,3 @@ Finally we check the descriptor size and change it to 128 if it is only 64-dim. (47, 128) @endcode Remaining part is matching which we will do in another chapter. - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.markdown b/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.markdown index 3c17b2ec9d..1dae65d64a 100644 --- a/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.markdown +++ b/doc/py_tutorials/py_gui/py_mouse_handling/py_mouse_handling.markdown @@ -101,8 +101,6 @@ while(1): cv.destroyAllWindows() @endcode -Additional Resources --------------------- Exercises --------- diff --git a/doc/py_tutorials/py_gui/py_video_display/py_video_display.markdown b/doc/py_tutorials/py_gui/py_video_display/py_video_display.markdown index 5819653fa0..0b34965479 100644 --- a/doc/py_tutorials/py_gui/py_video_display/py_video_display.markdown +++ b/doc/py_tutorials/py_gui/py_video_display/py_video_display.markdown @@ -152,9 +152,3 @@ cap.release() out.release() cv.destroyAllWindows() @endcode - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.markdown b/doc/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.markdown index 55c7d5c9d2..bb9d30b29b 100644 --- a/doc/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.markdown +++ b/doc/py_tutorials/py_imgproc/py_colorspaces/py_colorspaces.markdown @@ -103,9 +103,6 @@ Now you take [H-10, 100,100] and [H+10, 255, 255] as the lower bound and upper b from this method, you can use any image editing tools like GIMP or any online converters to find these values, but don't forget to adjust the HSV ranges. -Additional Resources --------------------- - Exercises --------- diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown index e98b8a64b9..d32eab2a59 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contour_features/py_contour_features.markdown @@ -199,9 +199,3 @@ righty = int(((cols-x)*vy/vx)+y) cv.line(img,(cols-1,righty),(0,lefty),(0,255,0),2) @endcode ![image](images/fitline.jpg) - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.markdown index 282f62ddf9..f685972e46 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contour_properties/py_contour_properties.markdown @@ -114,9 +114,6 @@ For eg, if I apply it to an Indian map, I get the following result : ![image](images/extremepoints.jpg) -Additional Resources --------------------- - Exercises --------- diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown index e96598b11e..a472346402 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_begin/py_contours_begin.markdown @@ -88,9 +88,3 @@ the contour array (drawn in blue color). First image shows points I got with cv. much memory it saves!!! ![image](images/none.jpg) - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown index 075e6ec81f..097722f8cb 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_hierarchy/py_contours_hierarchy.markdown @@ -212,9 +212,3 @@ array([[[ 7, -1, 1, -1], [ 8, 0, -1, -1], [-1, 7, -1, -1]]]) @endcode - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown b/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown index fc278669b0..0a511557ad 100644 --- a/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown +++ b/doc/py_tutorials/py_imgproc/py_contours/py_contours_more_functions/py_contours_more_functions.markdown @@ -124,9 +124,6 @@ See, even image rotation doesn't affect much on this comparison. moments invariant to translation, rotation and scale. Seventh one is skew-invariant. Those values can be found using **cv.HuMoments()** function. -Additional Resources -==================== - Exercises --------- diff --git a/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown b/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown index 82ce0d45ab..72e7b72b2e 100644 --- a/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown +++ b/doc/py_tutorials/py_imgproc/py_filtering/py_filtering.markdown @@ -150,6 +150,3 @@ Additional Resources -------------------- -# Details about the [bilateral filtering](http://people.csail.mit.edu/sparis/bf_course/) - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown b/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown index 6dd151fe96..6aa6e0b4e3 100644 --- a/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown +++ b/doc/py_tutorials/py_imgproc/py_geometric_transformations/py_geometric_transformations.markdown @@ -163,6 +163,3 @@ Additional Resources -------------------- -# "Computer Vision: Algorithms and Applications", Richard Szeliski - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown b/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown index 349ebac031..b9fea5fd29 100644 --- a/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown +++ b/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.markdown @@ -146,9 +146,6 @@ mark the rectangle area in mask image with 2-pixel or 3-pixel (probable backgrou mark our sure_foreground with 1-pixel as we did in second example. Then directly apply the grabCut function with mask mode. -Additional Resources --------------------- - Exercises --------- diff --git a/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown b/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown index 0b9556f2bb..2c0b03e913 100644 --- a/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown +++ b/doc/py_tutorials/py_imgproc/py_gradients/py_gradients.markdown @@ -103,9 +103,3 @@ plt.show() Check the result below: ![image](images/double_edge.jpg) - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown index 301b90cb38..9830308b77 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_2d_histogram/py_2d_histogram.markdown @@ -125,9 +125,3 @@ output of that code for the same image as above: You can clearly see in the histogram what colors are present, blue is there, yellow is there, and some white due to chessboard is there. Nice !!! - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown index dce31c376b..3b1097636f 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_backprojection/py_histogram_backprojection.markdown @@ -123,6 +123,3 @@ Additional Resources -# "Indexing via color histograms", Swain, Michael J. , Third international conference on computer vision,1990. - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown index 4f56da777a..91e2f71cd3 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_begins/py_histogram_begins.markdown @@ -197,6 +197,3 @@ Additional Resources -------------------- -# [Cambridge in Color website](http://www.cambridgeincolour.com/tutorials/histograms1.htm) - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown index bc9c69a714..c7160d2bd4 100644 --- a/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown +++ b/doc/py_tutorials/py_imgproc/py_histograms/py_histogram_equalization/py_histogram_equalization.markdown @@ -151,6 +151,3 @@ Also check these SOF questions regarding contrast adjustment: C?](http://stackoverflow.com/questions/10549245/how-can-i-adjust-contrast-in-opencv-in-c) 4. [How do I equalize contrast & brightness of images using opencv?](http://stackoverflow.com/questions/10561222/how-do-i-equalize-contrast-brightness-of-images-using-opencv) - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown b/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown index 570ad9145c..5778b83834 100644 --- a/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown +++ b/doc/py_tutorials/py_imgproc/py_houghcircles/py_houghcircles.markdown @@ -45,9 +45,3 @@ cv.destroyAllWindows() Result is shown below: ![image](images/houghcircles2.jpg) - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown b/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown index 9851599455..7f38a0cdc4 100644 --- a/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown +++ b/doc/py_tutorials/py_imgproc/py_houghlines/py_houghlines.markdown @@ -103,6 +103,3 @@ Additional Resources -------------------- -# [Hough Transform on Wikipedia](http://en.wikipedia.org/wiki/Hough_transform) - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown b/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown index f52a2ce411..24b504914f 100644 --- a/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown +++ b/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.markdown @@ -152,6 +152,3 @@ Additional Resources -------------------- -# [Morphological Operations](http://homepages.inf.ed.ac.uk/rbf/HIPR2/morops.htm) at HIPR2 - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown b/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown index 0470211fd3..df6ae70ed6 100644 --- a/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown +++ b/doc/py_tutorials/py_imgproc/py_pyramids/py_pyramids.markdown @@ -139,6 +139,3 @@ Additional Resources -------------------- -# [Image Blending](http://pages.cs.wisc.edu/~csverma/CS766_09/ImageMosaic/imagemosaic.html) - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown b/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown index 3a59bf6b23..e5eddb0e6b 100644 --- a/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown +++ b/doc/py_tutorials/py_imgproc/py_template_matching/py_template_matching.markdown @@ -132,9 +132,3 @@ cv.imwrite('res.png',img_rgb) Result: ![image](images/res_mario.jpg) - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown b/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown index df12efd45c..5378012534 100644 --- a/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown +++ b/doc/py_tutorials/py_imgproc/py_transforms/py_fourier_transform/py_fourier_transform.markdown @@ -291,6 +291,3 @@ Additional Resources Theory](http://cns-alumni.bu.edu/~slehar/fourier/fourier.html) by Steven Lehar 2. [Fourier Transform](http://homepages.inf.ed.ac.uk/rbf/HIPR2/fourier.htm) at HIPR 3. [What does frequency domain denote in case of images?](http://dsp.stackexchange.com/q/1637/818) - -Exercises ---------- diff --git a/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.markdown b/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.markdown index 05a1300a16..4982f72df6 100644 --- a/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.markdown +++ b/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_opencv/py_kmeans_opencv.markdown @@ -186,9 +186,3 @@ cv.destroyAllWindows() See the result below for K=8: ![image](images/oc_color_quantization.jpg) - -Additional Resources --------------------- - -Exercises ---------- diff --git a/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/py_kmeans_understanding.markdown b/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/py_kmeans_understanding.markdown index 988d5b08b8..ebf0007cd2 100644 --- a/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/py_kmeans_understanding.markdown +++ b/doc/py_tutorials/py_ml/py_kmeans/py_kmeans_understanding/py_kmeans_understanding.markdown @@ -80,6 +80,3 @@ Additional Resources -# [Machine Learning Course](https://www.coursera.org/course/ml), Video lectures by Prof. Andrew Ng (Some of the images are taken from this) - -Exercises ---------- diff --git a/doc/py_tutorials/py_photo/py_non_local_means/py_non_local_means.markdown b/doc/py_tutorials/py_photo/py_non_local_means/py_non_local_means.markdown index 94e57d4d6e..36a5a4a782 100644 --- a/doc/py_tutorials/py_photo/py_non_local_means/py_non_local_means.markdown +++ b/doc/py_tutorials/py_photo/py_non_local_means/py_non_local_means.markdown @@ -147,6 +147,3 @@ Additional Resources recommended to visit. Our test image is generated from this link) 2. [Online course at coursera](https://www.coursera.org/course/images) (First image taken from here) - -Exercises ---------- diff --git a/doc/py_tutorials/py_setup/py_setup_in_fedora/py_setup_in_fedora.markdown b/doc/py_tutorials/py_setup/py_setup_in_fedora/py_setup_in_fedora.markdown index 4c0433f4ff..fd1e6121c4 100644 --- a/doc/py_tutorials/py_setup/py_setup_in_fedora/py_setup_in_fedora.markdown +++ b/doc/py_tutorials/py_setup/py_setup_in_fedora/py_setup_in_fedora.markdown @@ -240,9 +240,6 @@ make doxygen @endcode Then open opencv/build/doc/doxygen/html/index.html and bookmark it in the browser. -Additional Resources --------------------- - Exercises --------- diff --git a/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown b/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown index 305fc21faf..ed862757a6 100644 --- a/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown +++ b/doc/py_tutorials/py_setup/py_setup_in_windows/py_setup_in_windows.markdown @@ -119,9 +119,6 @@ Building OpenCV from source @note We have installed with no other support like TBB, Eigen, Qt, Documentation etc. It would be difficult to explain it here. A more detailed video will be added soon or you can just hack around. -Additional Resources --------------------- - Exercises --------- diff --git a/doc/tutorials/imgproc/basic_geometric_drawing/basic_geometric_drawing.markdown b/doc/tutorials/imgproc/basic_geometric_drawing/basic_geometric_drawing.markdown index 630d427189..b5070f1a8a 100644 --- a/doc/tutorials/imgproc/basic_geometric_drawing/basic_geometric_drawing.markdown +++ b/doc/tutorials/imgproc/basic_geometric_drawing/basic_geometric_drawing.markdown @@ -21,15 +21,22 @@ In this tutorial you will learn how to: - Draw a **circle** by using the OpenCV function **circle()** - Draw a **filled polygon** by using the OpenCV function **fillPoly()** -@add_toggle_cpp OpenCV Theory ------------- +@add_toggle_cpp For this tutorial, we will heavily use two structures: @ref cv::Point and @ref cv::Scalar : +@end_toggle +@add_toggle_java +For this tutorial, we will heavily use two structures: @ref cv::Point and @ref cv::Scalar : +@end_toggle +@add_toggle_python +For this tutorial, we will heavily use tuples in Python instead of @ref cv::Point and @ref cv::Scalar : +@end_toggle ### Point - It represents a 2D point, specified by its image coordinates \f$x\f$ and \f$y\f$. We can define it as: +@add_toggle_cpp @code{.cpp} Point pt; pt.x = 10; @@ -39,28 +46,8 @@ or @code{.cpp} Point pt = Point(10, 8); @endcode -### Scalar - -- Represents a 4-element vector. The type Scalar is widely used in OpenCV for passing pixel - values. -- In this tutorial, we will use it extensively to represent BGR color values (3 parameters). It is - not necessary to define the last argument if it is not going to be used. -- Let's see an example, if we are asked for a color argument and we give: - @code{.cpp} - Scalar( a, b, c ) - @endcode - We would be defining a BGR color such as: *Blue = a*, *Green = b* and *Red = c* @end_toggle - @add_toggle_java -OpenCV Theory -------------- - -For this tutorial, we will heavily use two structures: @ref cv::Point and @ref cv::Scalar : - -### Point - -It represents a 2D point, specified by its image coordinates \f$x\f$ and \f$y\f$. We can define it as: @code{.java} Point pt = new Point(); pt.x = 10; @@ -70,6 +57,12 @@ or @code{.java} Point pt = new Point(10, 8); @endcode +@end_toggle +@add_toggle_python +@code{.python} +pt = (10, 0) # x = 10, y = 0 +@endcode +@end_toggle ### Scalar - Represents a 4-element vector. The type Scalar is widely used in OpenCV for passing pixel @@ -77,11 +70,22 @@ Point pt = new Point(10, 8); - In this tutorial, we will use it extensively to represent BGR color values (3 parameters). It is not necessary to define the last argument if it is not going to be used. - Let's see an example, if we are asked for a color argument and we give: +@add_toggle_cpp + @code{.cpp} + Scalar( a, b, c ) + @endcode +@end_toggle +@add_toggle_java @code{.java} Scalar( a, b, c ) @endcode - We would be defining a BGR color such as: *Blue = a*, *Green = b* and *Red = c* @end_toggle +@add_toggle_python + @code{.python} + ( a, b, c ) + @endcode +@end_toggle + We would be defining a BGR color such as: *Blue = a*, *Green = b* and *Red = c* Code ---- diff --git a/doc/tutorials/introduction/config_reference/config_reference.markdown b/doc/tutorials/introduction/config_reference/config_reference.markdown index 1206463baa..905d7b95c2 100644 --- a/doc/tutorials/introduction/config_reference/config_reference.markdown +++ b/doc/tutorials/introduction/config_reference/config_reference.markdown @@ -393,7 +393,7 @@ There are multiple less popular frameworks which can be used to read and write v | Option | Default | Description | | ------ | ------- | ----------- | -| `WITH_1394` | _ON_ | [IIDC IEEE1394](https://en.wikipedia.org/wiki/IEEE_1394#IIDC) support using DC1394 library | +| `WITH_1394` | _OFF_ | [IIDC IEEE1394](https://en.wikipedia.org/wiki/IEEE_1394#IIDC) support using DC1394 library | | `WITH_OPENNI` | _OFF_ | [OpenNI](https://en.wikipedia.org/wiki/OpenNI) can be used to capture data from depth-sensing cameras. Deprecated. | | `WITH_OPENNI2` | _OFF_ | [OpenNI2](https://structure.io/openni) can be used to capture data from depth-sensing cameras. | | `WITH_PVAPI` | _OFF_ | [PVAPI](https://www.alliedvision.com/en/support/software-downloads.html) is legacy SDK for Prosilica GigE cameras. Deprecated. | @@ -455,6 +455,8 @@ OpenCV relies on various GUI libraries for window drawing. | `WITH_WIN32UI` | _ON_ | Windows | [WinAPI](https://en.wikipedia.org/wiki/Windows_API) is a standard GUI API in Windows. | | N/A | _ON_ | macOS | [Cocoa](https://en.wikipedia.org/wiki/Cocoa_(API)) is a framework used in macOS. | | `WITH_QT` | _OFF_ | Cross-platform | [Qt](https://en.wikipedia.org/wiki/Qt_(software)) is a cross-platform GUI framework. | +| `WITH_FRAMEBUFFER` | _OFF_ | Linux | Experimental backend using [Linux framebuffer](https://en.wikipedia.org/wiki/Linux_framebuffer). Have limited functionality but does not require dependencies. | +| `WITH_FRAMEBUFFER_XVFB` | _OFF_ | Linux | Enables special output mode of the FRAMEBUFFER backend compatible with [xvfb](https://en.wikipedia.org/wiki/Xvfb) tool. Requires some X11 headers. | @note OpenCV compiled with Qt support enables advanced _highgui_ interface, see @ref highgui_qt for details. diff --git a/doc/tutorials/introduction/env_reference/env_reference.markdown b/doc/tutorials/introduction/env_reference/env_reference.markdown index c25ea9e533..afbde7b715 100644 --- a/doc/tutorials/introduction/env_reference/env_reference.markdown +++ b/doc/tutorials/introduction/env_reference/env_reference.markdown @@ -329,6 +329,9 @@ Some external dependencies can be detached into a dynamic library, which will be |------|------|---------|-------------| | OPENCV_LEGACY_WAITKEY | non-null | | switch `waitKey` return result (default behavior: `return code & 0xff` (or -1), legacy behavior: `return code`) | | $XDG_RUNTIME_DIR | | | Wayland backend specific - create shared memory-mapped file for interprocess communication (named `opencv-shared-??????`) | +| OPENCV_HIGHGUI_FB_MODE | string | `FB` | Selects output mode for the framebuffer backend (`FB` - regular frambuffer, `EMU` - emulation, perform internal checks but does nothing, `XVFB` - compatible with _xvfb_ virtual frambuffer) | +| OPENCV_HIGHGUI_FB_DEVICE | file path | | Path to frambuffer device to use (will be checked first) | +| FRAMEBUFFER | file path | `/dev/fb0` | Same as OPENCV_HIGHGUI_FB_DEVICE, commonly used variable for the same purpose (will be checked second) | ## imgproc diff --git a/doc/tutorials/others/stitcher.markdown b/doc/tutorials/others/stitcher.markdown index 121917fd69..8c9ffa4a4d 100644 --- a/doc/tutorials/others/stitcher.markdown +++ b/doc/tutorials/others/stitcher.markdown @@ -23,18 +23,36 @@ In this tutorial you will learn how to: Code ---- +@add_toggle_cpp +This tutorial's code is shown in the lines below. You can download it from [here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/stitching.cpp). -This tutorial code's is shown lines below. You can also download it from -[here](https://github.com/opencv/opencv/tree/5.x/samples/cpp/stitching.cpp). +Note: The C++ version includes additional options such as image division (--d3) and more detailed error handling, which are not present in the Python example. @include samples/cpp/snippets/stitching.cpp +@end_toggle + +@add_toggle_python +This tutorial's code is shown in the lines below. You can download it from [here](https://github.com/opencv/opencv/blob/5.x/samples/python/stitching.py). + +Note: The C++ version includes additional options such as image division (--d3) and more detailed error handling, which are not present in the Python example. + +@include samples/python/snippets/stitching.py + +@end_toggle + Explanation ----------- The most important code part is: +@add_toggle_cpp @snippet cpp/snippets/stitching.cpp stitching +@end_toggle + +@add_toggle_python +@snippet python/snippets/stitching.py stitching +@end_toggle A new instance of stitcher is created and the @ref cv::Stitcher::stitch will do all the hard work. diff --git a/modules/calib/src/calibinit.cpp b/modules/calib/src/calibinit.cpp index d49e8a533c..31e7141f4e 100644 --- a/modules/calib/src/calibinit.cpp +++ b/modules/calib/src/calibinit.cpp @@ -152,7 +152,7 @@ struct ChessBoardQuad int group_idx; // quad group ID int row, col; // row and column of this quad bool ordered; // true if corners/neighbors are ordered counter-clockwise - float edge_len; // quad edge len, in pix^2 + float edge_sqr_len; // quad edge squared length, in pix^2 // neighbors and corners are synced, i.e., neighbor 0 shares corner 0 ChessBoardCorner *corners[4]; // Coordinates of quad corners struct ChessBoardQuad *neighbors[4]; // Pointers of quad neighbors. M.b. sparse. @@ -163,7 +163,7 @@ struct ChessBoardQuad group_idx(group_idx_), row(0), col(0), ordered(0), - edge_len(0) + edge_sqr_len(0) { corners[0] = corners[1] = corners[2] = corners[3] = NULL; neighbors[0] = neighbors[1] = neighbors[2] = neighbors[3] = NULL; @@ -221,7 +221,7 @@ public: int all_quads_count; struct NeighborsFinder { - const float thresh_scale = sqrt(2.f); + const float thresh_sqr_scale = 2.f; ChessBoardDetector& detector; std::vector neighbors_indices; std::vector neighbors_dists; @@ -234,8 +234,8 @@ public: const int quad_idx, const int corner_idx, const cv::Point2f& corner_pt, - float& min_dist, - const float radius, + float& min_sqr_dist, + const float sqr_radius, int& closest_quad_idx, int& closest_corner_idx, cv::Point2f& closest_corner_pt); @@ -531,8 +531,8 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor( const int quad_idx, const int corner_idx, const cv::Point2f& corner_pt, - float& min_dist, - const float radius, + float& min_sqr_dist, + const float sqr_radius, int& closest_quad_idx, int& closest_corner_idx, cv::Point2f& closest_corner_pt) @@ -546,7 +546,7 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor( // find the closest corner in all other quadrangles const std::vector query = { corner_pt.x, corner_pt.y }; const cvflann::SearchParams search_params(-1); - const int neighbors_count = all_quads_pts_index.radiusSearch(query, neighbors_indices, neighbors_dists, radius, search_params); + const int neighbors_count = all_quads_pts_index.radiusSearch(query, neighbors_indices, neighbors_dists, sqr_radius, search_params); for (int neighbor_idx_idx = 0; neighbor_idx_idx < neighbors_count; neighbor_idx_idx++) { @@ -561,16 +561,16 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor( continue; const Point2f neighbor_pt = all_quads_pts[neighbor_idx]; - const float dist = normL2Sqr(corner_pt - neighbor_pt); - if (dist <= cur_quad.edge_len * thresh_scale && - dist <= q_k.edge_len * thresh_scale) + const float sqr_dist = normL2Sqr(corner_pt - neighbor_pt); + if (sqr_dist <= cur_quad.edge_sqr_len * thresh_sqr_scale && + sqr_dist <= q_k.edge_sqr_len * thresh_sqr_scale) { // check edge lengths, make sure they're compatible // edges that are different by more than 1:4 are rejected. - // edge_len is squared edge length, so we compare them + // edge_sqr_len is edge squared length, so we compare them // with squared constant 16 = 4^2 - if (q_k.edge_len > 16 * cur_quad.edge_len || - cur_quad.edge_len > 16 * q_k.edge_len) + if (q_k.edge_sqr_len > 16 * cur_quad.edge_sqr_len || + cur_quad.edge_sqr_len > 16 * q_k.edge_sqr_len) { DPRINTF("Incompatible edge lengths"); continue; @@ -590,20 +590,20 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor( if (!arePointsOnSameSideFromLine(mid_pt1, mid_pt2, corner_pt, neighbor_pt_diagonal)) continue; - if (!arePointsOnSameSideFromLine(mid_pt3, mid_pt4, neighbor_pt, neighbor_pt_diagonal)) + if (!arePointsOnSameSideFromLine(mid_pt3, mid_pt4, corner_pt, neighbor_pt_diagonal)) continue; closest_neighbor_idx = neighbor_idx; closest_quad_idx = k; closest_corner_idx = j; closest_quad = &q_k; - min_dist = dist; + min_sqr_dist = sqr_dist; break; } } // we found a matching corner point? - if (closest_neighbor_idx >= 0 && closest_quad_idx >= 0 && closest_corner_idx >= 0 && min_dist < FLT_MAX) + if (closest_neighbor_idx >= 0 && closest_quad_idx >= 0 && closest_corner_idx >= 0 && min_sqr_dist < FLT_MAX) { CV_Assert(closest_quad); @@ -622,7 +622,7 @@ bool ChessBoardDetector::NeighborsFinder::findCornerNeighbor( if (cur_quad.neighbors[j] == closest_quad) break; - if (normL2Sqr(closest_corner_pt - all_quads_pts[(quad_idx << 2) + j]) < min_dist) + if (normL2Sqr(closest_corner_pt - all_quads_pts[(quad_idx << 2) + j]) < min_sqr_dist) break; } if (j < 4) @@ -1169,7 +1169,7 @@ int ChessBoardDetector::addOuterQuad(ChessBoardQuad& quad, std::vector(q.corners[i]->pt - q.corners[(i+1)&3]->pt); - q.edge_len = std::min(q.edge_len, d); + float sqr_d = normL2Sqr(q.corners[i]->pt - q.corners[(i+1)&3]->pt); + q.edge_sqr_len = std::min(q.edge_sqr_len, sqr_d); } const int edge_len_compensation = 2 * dilations; - q.edge_len += 2 * sqrt(q.edge_len) * edge_len_compensation + edge_len_compensation * edge_len_compensation; + q.edge_sqr_len += 2 * sqrt(q.edge_sqr_len) * edge_len_compensation + edge_len_compensation * edge_len_compensation; } all_quads_count = quad_count; diff --git a/modules/calib3d/misc/js/gen_dict.json b/modules/calib3d/misc/js/gen_dict.json new file mode 100644 index 0000000000..407ddf1e48 --- /dev/null +++ b/modules/calib3d/misc/js/gen_dict.json @@ -0,0 +1,21 @@ +{ + "whitelist": + { + "": [ + "findHomography", + "calibrateCameraExtended", + "drawFrameAxes", + "estimateAffine2D", + "getDefaultNewCameraMatrix", + "initUndistortRectifyMap", + "Rodrigues", + "solvePnP", + "solvePnPRansac", + "solvePnPRefineLM", + "projectPoints", + "undistort", + "fisheye_initUndistortRectifyMap", + "fisheye_projectPoints" + ] + } +} diff --git a/modules/core/include/opencv2/core/check.hpp b/modules/core/include/opencv2/core/check.hpp index c9ce97b6ae..aa1a839f70 100644 --- a/modules/core/include/opencv2/core/check.hpp +++ b/modules/core/include/opencv2/core/check.hpp @@ -135,6 +135,9 @@ CV_EXPORTS void CV_NORETURN check_failed_MatChannels(const int v, const CheckCon /// Example: depth == CV_32F || depth == CV_64F #define CV_CheckDepth(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatDepth, t, (test_expr), #t, #test_expr, msg) +/// Example: channel == 1 || channel == 3 +#define CV_CheckChannels(t, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, MatChannels, t, (test_expr), #t, #test_expr, msg) + /// Example: v == A || v == B #define CV_Check(v, test_expr, msg) CV__CHECK_CUSTOM_TEST(_, auto, v, (test_expr), #v, #test_expr, msg) diff --git a/modules/core/include/opencv2/core/types.hpp b/modules/core/include/opencv2/core/types.hpp index 096348cc3c..b8b310fc37 100644 --- a/modules/core/include/opencv2/core/types.hpp +++ b/modules/core/include/opencv2/core/types.hpp @@ -478,7 +478,14 @@ public: template operator Rect_<_Tp2>() const; //! checks whether the rectangle contains the point - bool contains(const Point_<_Tp>& pt) const; + /*! @warning After OpenCV 4.11.0, when calling Rect.contains() with cv::Point2f / cv::Point2d point, point should not convert/round to int. + * ``` + * Rect_ r(0,0,500,500); Point_ pt(250.0f, 499.9f); + * r.contains(pt) returns false.(OpenCV 4.10.0 or before) + * r.contains(pt) returns true. (OpenCV 4.11.0 or later) + * ``` + */ + template inline bool contains(const Point_<_Tp2>& pt) const; _Tp x; //!< x coordinate of the top-left corner _Tp y; //!< y coordinate of the top-left corner @@ -1861,12 +1868,29 @@ Rect_<_Tp>::operator Rect_<_Tp2>() const return Rect_<_Tp2>(saturate_cast<_Tp2>(x), saturate_cast<_Tp2>(y), saturate_cast<_Tp2>(width), saturate_cast<_Tp2>(height)); } -template inline -bool Rect_<_Tp>::contains(const Point_<_Tp>& pt) const +template template inline +bool Rect_<_Tp>::contains(const Point_<_Tp2>& pt) const { return x <= pt.x && pt.x < x + width && y <= pt.y && pt.y < y + height; } - +// See https://github.com/opencv/opencv/issues/26016 +template<> template<> inline +bool Rect_::contains(const Point_& pt) const +{ + // std::numeric_limits::digits is 31. + // std::numeric_limits::digits is 53. + // So conversion int->double does not lead to accuracy errors. + const Rect_ _rect(static_cast(x), static_cast(y), static_cast(width), static_cast(height)); + return _rect.contains(pt); +} +template<> template<> inline +bool Rect_::contains(const Point_& _pt) const +{ + // std::numeric_limits::digits is 24. + // std::numeric_limits::digits is 53. + // So conversion float->double does not lead to accuracy errors. + return contains(Point_(static_cast(_pt.x), static_cast(_pt.y))); +} template static inline Rect_<_Tp>& operator += ( Rect_<_Tp>& a, const Point_<_Tp>& b ) diff --git a/modules/core/include/opencv2/core/utils/filesystem.private.hpp b/modules/core/include/opencv2/core/utils/filesystem.private.hpp index c6bd5b316a..80c9a5282c 100644 --- a/modules/core/include/opencv2/core/utils/filesystem.private.hpp +++ b/modules/core/include/opencv2/core/utils/filesystem.private.hpp @@ -13,7 +13,7 @@ /* not supported */ # elif defined __ANDROID__ || defined __linux__ || defined _WIN32 || \ defined __FreeBSD__ || defined __bsdi__ || defined __HAIKU__ || \ - defined __GNU__ + defined __GNU__ || defined __QNX__ # define OPENCV_HAVE_FILESYSTEM_SUPPORT 1 # elif defined(__APPLE__) # include diff --git a/modules/core/misc/js/gen_dict.json b/modules/core/misc/js/gen_dict.json new file mode 100644 index 0000000000..f1e892d86b --- /dev/null +++ b/modules/core/misc/js/gen_dict.json @@ -0,0 +1,15 @@ +{ + "whitelist": + { + "": [ + "absdiff", "add", "addWeighted", "bitwise_and", "bitwise_not", "bitwise_or", "bitwise_xor", "cartToPolar", + "compare", "convertScaleAbs", "copyMakeBorder", "countNonZero", "determinant", "dft", "divide", "eigen", + "exp", "flip", "getOptimalDFTSize","gemm", "hconcat", "inRange", "invert", "kmeans", "log", "magnitude", + "max", "mean", "meanStdDev", "merge", "min", "minMaxLoc", "mixChannels", "multiply", "norm", "normalize", + "perspectiveTransform", "polarToCart", "pow", "randn", "randu", "reduce", "repeat", "rotate", "setIdentity", "setRNGSeed", + "solve", "solvePoly", "split", "sqrt", "subtract", "trace", "transform", "transpose", "vconcat", + "setLogLevel", "getLogLevel", "LUT" + ], + "Algorithm": [] + } +} diff --git a/modules/core/perf/opencl/perf_arithm.cpp b/modules/core/perf/opencl/perf_arithm.cpp index 3319ab1272..4de20ca228 100644 --- a/modules/core/perf/opencl/perf_arithm.cpp +++ b/modules/core/perf/opencl/perf_arithm.cpp @@ -689,6 +689,24 @@ OCL_PERF_TEST_P(PowFixture, Pow, ::testing::Combine( SANITY_CHECK(dst, 1.5e-6, ERROR_RELATIVE); } +///////////// iPow //////////////////////// +OCL_PERF_TEST_P(PowFixture, iPow, ::testing::Combine( + OCL_TEST_SIZES, OCL_PERF_ENUM(CV_8UC1, CV_8SC1,CV_16UC1,CV_16SC1,CV_32SC1))) +{ + const Size_MatType_t params = GetParam(); + const Size srcSize = get<0>(params); + const int type = get<1>(params); + + checkDeviceMaxMemoryAllocSize(srcSize, type); + + UMat src(srcSize, type), dst(srcSize, type); + randu(src, 0, 100); + declare.in(src).out(dst); + + OCL_TEST_CYCLE() cv::pow(src, 7.0, dst); + + SANITY_CHECK_NOTHING(); +} ///////////// AddWeighted//////////////////////// typedef Size_MatType AddWeightedFixture; diff --git a/modules/core/src/check.cpp b/modules/core/src/check.cpp index fe2eb681ca..a6588db3b4 100644 --- a/modules/core/src/check.cpp +++ b/modules/core/src/check.cpp @@ -156,7 +156,12 @@ void check_failed_MatType(const int v, const CheckContext& ctx) } void check_failed_MatChannels(const int v, const CheckContext& ctx) { - check_failed_auto_(v, ctx); + std::stringstream ss; + ss << ctx.message << ":" << std::endl + << " '" << ctx.p2_str << "'" << std::endl + << "where" << std::endl + << " '" << ctx.p1_str << "' is " << v; + cv::error(cv::Error::BadNumChannels, ss.str(), ctx.func, ctx.file, ctx.line); } void check_failed_true(const bool v, const CheckContext& ctx) { diff --git a/modules/core/src/mathfuncs.cpp b/modules/core/src/mathfuncs.cpp index 6514468d80..ec99a22a13 100644 --- a/modules/core/src/mathfuncs.cpp +++ b/modules/core/src/mathfuncs.cpp @@ -791,7 +791,7 @@ struct iPow_SIMD #if (CV_SIMD || CV_SIMD_SCALABLE) template <> -struct iPow_SIMD +struct iPow_SIMD { int operator() ( const uchar * src, uchar * dst, int len, int power ) { @@ -871,7 +871,7 @@ struct iPow_SIMD }; template <> -struct iPow_SIMD +struct iPow_SIMD { int operator() ( const ushort * src, ushort * dst, int len, int power) { @@ -1203,16 +1203,6 @@ static bool ocl_pow(InputArray _src, double power, OutputArray _dst, _dst.createSameSize(_src, type); if (is_ipower) { - if (ipower == 0) - { - _dst.setTo(Scalar::all(1)); - return true; - } - if (ipower == 1) - { - _src.copyTo(_dst); - return true; - } if( ipower < 0 ) { if( depth == CV_32F || depth == CV_64F ) @@ -1271,11 +1261,7 @@ void pow( InputArray _src, double power, OutputArray _dst ) bool useOpenCL = _dst.isUMat() && _src.dims() <= 2; #endif - if( is_ipower -#ifdef HAVE_OPENCL - && !(useOpenCL && ocl::Device::getDefault().isIntel() && depth != CV_64F) -#endif - ) + if (is_ipower) { switch( ipower ) { @@ -1291,8 +1277,6 @@ void pow( InputArray _src, double power, OutputArray _dst ) return; } } - else - CV_Assert( depth == CV_32F || depth == CV_64F ); CV_OCL_RUN(useOpenCL, ocl_pow(_src, power, _dst, is_ipower, ipower)) diff --git a/modules/core/src/parallel.cpp b/modules/core/src/parallel.cpp index f9041d4921..01522b3b19 100644 --- a/modules/core/src/parallel.cpp +++ b/modules/core/src/parallel.cpp @@ -72,6 +72,10 @@ #endif #endif +#if defined (__QNX__) + #include +#endif + #ifndef OPENCV_DISABLE_THREAD_SUPPORT #include #endif @@ -1011,7 +1015,9 @@ int getNumberOfCPUs_() static unsigned cpu_count_sysconf = (unsigned)sysconf( _SC_NPROCESSORS_ONLN ); ncpus = minNonZero(ncpus, cpu_count_sysconf); - +#elif defined (__QNX__) + static unsigned cpu_count_sysconf = _syspage_ptr->num_cpu; + ncpus = minNonZero(ncpus, cpu_count_sysconf); #endif return ncpus != 0 ? ncpus : 1; diff --git a/modules/core/src/system.cpp b/modules/core/src/system.cpp index 893231191c..65da155bd4 100644 --- a/modules/core/src/system.cpp +++ b/modules/core/src/system.cpp @@ -46,6 +46,15 @@ #include #include +#ifdef __QNX__ + #include + #include + #include +#ifdef __aarch64__ + #include +#endif +#endif + #include #include diff --git a/modules/core/src/utils/filesystem.cpp b/modules/core/src/utils/filesystem.cpp index e7c8336b78..0a64bc994f 100644 --- a/modules/core/src/utils/filesystem.cpp +++ b/modules/core/src/utils/filesystem.cpp @@ -34,7 +34,7 @@ #include #include #include -#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __GNU__ || defined __EMSCRIPTEN__ +#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __GNU__ || defined __EMSCRIPTEN__ || defined __QNX__ #include #include #include @@ -194,7 +194,7 @@ cv::String getcwd() sz = GetCurrentDirectoryA((DWORD)buf.size(), buf.data()); return cv::String(buf.data(), (size_t)sz); #endif -#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __EMSCRIPTEN__ +#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __EMSCRIPTEN__ || defined __QNX__ for(;;) { char* p = ::getcwd(buf.data(), buf.size()); @@ -228,7 +228,7 @@ bool createDirectory(const cv::String& path) #else int result = _mkdir(path.c_str()); #endif -#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __EMSCRIPTEN__ +#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __EMSCRIPTEN__ || defined __QNX__ int result = mkdir(path.c_str(), 0777); #else int result = -1; @@ -343,7 +343,7 @@ private: Impl& operator=(const Impl&); // disabled }; -#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __GNU__ || defined __EMSCRIPTEN__ +#elif defined __linux__ || defined __APPLE__ || defined __HAIKU__ || defined __FreeBSD__ || defined __GNU__ || defined __EMSCRIPTEN__ || defined __QNX__ struct FileLock::Impl { diff --git a/modules/core/test/test_misc.cpp b/modules/core/test/test_misc.cpp index 827f9185db..2d5e616997 100644 --- a/modules/core/test/test_misc.cpp +++ b/modules/core/test/test_misc.cpp @@ -908,7 +908,22 @@ TYPED_TEST_P(Rect_Test, Overflows) { EXPECT_EQ(R(), R(20, 0, 10, 10) & R(0, num_lowest, 10, 10)); EXPECT_EQ(R(), R(num_lowest, 0, 10, 10) & R(0, num_lowest, 10, 10)); } -REGISTER_TYPED_TEST_CASE_P(Rect_Test, Overflows); + +// See https://github.com/opencv/opencv/issues/26016 +// Rect_.contains(Point_) needs template specialization. +// This is test for a point on the edge and its nearest points. +template T cv_nexttoward(T v, T v2); +template<> int cv_nexttoward(int v, int v2) { CV_UNUSED(v); return v2; } +template<> float cv_nexttoward(float v, float v2) { return std::nextafter(v,v2); } +template<> double cv_nexttoward(double v, double v2) { return std::nexttoward(v,v2); } +TYPED_TEST_P(Rect_Test, OnTheEdge) { + Rect_ rect(0,0,500,500); + TypeParam h = static_cast(rect.height); + ASSERT_TRUE ( rect.contains( Point_(250, cv_nexttoward(h, h - 1)))); + ASSERT_FALSE( rect.contains( Point_(250, cv_nexttoward(h, h )))); + ASSERT_FALSE( rect.contains( Point_(250, cv_nexttoward(h, h + 1)))); +} +REGISTER_TYPED_TEST_CASE_P(Rect_Test, Overflows, OnTheEdge); typedef ::testing::Types RectTypes; INSTANTIATE_TYPED_TEST_CASE_P(Negative_Test, Rect_Test, RectTypes); diff --git a/modules/core/test/test_operations.cpp b/modules/core/test/test_operations.cpp index 0bca7d61f2..a69c772542 100644 --- a/modules/core/test/test_operations.cpp +++ b/modules/core/test/test_operations.cpp @@ -42,7 +42,7 @@ #include "test_precomp.hpp" #include "opencv2/ts/ocl_test.hpp" // T-API like tests -#include "opencv2/core/core_c.h" +#include namespace opencv_test { namespace { @@ -1087,7 +1087,6 @@ bool CV_OperationsTest::operations1() Size sz(10, 20); if (sz.area() != 200) throw test_excep(); if (sz.width != 10 || sz.height != 20) throw test_excep(); - if (cvSize(sz).width != 10 || cvSize(sz).height != 20) throw test_excep(); Rect r1(0, 0, 10, 20); Size sz1(5, 10); @@ -1519,7 +1518,7 @@ TEST(Core_sortIdx, regression_8941) ); cv::Mat result; - cv::sortIdx(src.col(0), result, CV_SORT_EVERY_COLUMN | CV_SORT_ASCENDING); + cv::sortIdx(src.col(0), result, cv::SORT_EVERY_COLUMN | cv::SORT_ASCENDING); #if 0 std::cout << src.col(0) << std::endl; std::cout << result << std::endl; @@ -1598,9 +1597,12 @@ TEST_P(Core_Arith_Regression24163, test_for_ties_to_even) const Mat src2(matSize, matType, Scalar(beta, beta, beta, beta)); const Mat result = ( src1 + src2 ) / 2; - // Expected that default is FE_TONEAREST(Ties to Even). + const int rounding = fegetround(); + fesetround(FE_TONEAREST); const int mean = (int)lrint( static_cast(alpha + beta) / 2.0 ); - const Mat expected(matSize, matType, Scalar(mean,mean,mean,mean)); + fesetround(rounding); + + const Mat expected(matSize, matType, Scalar::all(mean)); // Compare result and extected. ASSERT_EQ(expected.size(), result.size()); diff --git a/modules/dnn/include/opencv2/dnn/all_layers.hpp b/modules/dnn/include/opencv2/dnn/all_layers.hpp index 525f955745..c4fd9ef971 100644 --- a/modules/dnn/include/opencv2/dnn/all_layers.hpp +++ b/modules/dnn/include/opencv2/dnn/all_layers.hpp @@ -1203,6 +1203,12 @@ CV__DNN_INLINE_NS_BEGIN static Ptr create(const LayerParams ¶ms); }; + class CV_EXPORTS TopKLayer : public Layer + { + public: + static Ptr create(const LayerParams& params); + }; + //! @} //! @} CV__DNN_INLINE_NS_END diff --git a/modules/dnn/misc/js/gen_dict.json b/modules/dnn/misc/js/gen_dict.json new file mode 100644 index 0000000000..4b83a0e102 --- /dev/null +++ b/modules/dnn/misc/js/gen_dict.json @@ -0,0 +1,12 @@ +{ + "whitelist": + { + "dnn_Net": ["setInput", "forward", "setPreferableBackend","getUnconnectedOutLayersNames"], + "": ["readNetFromCaffe", "readNetFromTensorflow", "readNetFromTorch", "readNetFromDarknet", + "readNetFromONNX", "readNetFromTFLite", "readNet", "blobFromImage"] + }, + "namespace_prefix_override": + { + "dnn": "" + } +} diff --git a/modules/dnn/perf/perf_layer.cpp b/modules/dnn/perf/perf_layer.cpp index a3b77b7111..ad16e9c0ab 100644 --- a/modules/dnn/perf/perf_layer.cpp +++ b/modules/dnn/perf/perf_layer.cpp @@ -1041,4 +1041,67 @@ INSTANTIATE_TEST_CASE_P(/**/, Layer_Elementwise, /* withWebnn= */ false, /* withCann= */ false)); +struct Layer_TopK : public TestBaseWithParam> { + void test_layer(const std::vector &input_shape, const int K, const int axis) { + int backend_id = get<0>(GetParam()); + int target_id = get<1>(GetParam()); + + Mat input_data(input_shape, CV_32F); + randn(input_data, -1.f, 1.f); + + Net net; + LayerParams lp; + lp.type = "TopK"; + lp.name = "testLayer"; + lp.set("k", K); + lp.set("axis", axis); + net.addLayerToPrev(lp.name, lp.type, lp); + + // Warmup + { + net.setInput(input_data); + net.setPreferableBackend(backend_id); + net.setPreferableTarget(target_id); + net.forward(); + } + + TEST_CYCLE() { + net.forward(); + } + + SANITY_CHECK_NOTHING(); + } + + std::vector input_shape_2d{1000, 100}; + std::vector input_shape_3d{100, 100, 100}; +}; + +PERF_TEST_P_(Layer_TopK, TopK_2D_Axis0) { + test_layer(input_shape_2d, input_shape_2d[0] / 2, 0); +} +PERF_TEST_P_(Layer_TopK, TopK_2D_Axis0_K5) { + test_layer(input_shape_2d, 5, 0); +} +PERF_TEST_P_(Layer_TopK, TopK_2D_Axis1) { + test_layer(input_shape_2d, input_shape_2d[1] / 2, 1); +} +PERF_TEST_P_(Layer_TopK, TopK_3D_Axis0) { + test_layer(input_shape_3d, input_shape_3d[0] / 2, 0); +} +PERF_TEST_P_(Layer_TopK, TopK_3D_Axis1) { + test_layer(input_shape_3d, input_shape_3d[1] / 2, 1); +} +PERF_TEST_P_(Layer_TopK, TopK_3D_Axis2) { + test_layer(input_shape_3d, input_shape_3d[2] / 2, 2); +} +INSTANTIATE_TEST_CASE_P(/**/, Layer_TopK, + dnnBackendsAndTargets(/* withInferenceEngine= */ false, + /* withHalide= */ false, + /* withCpuOCV= */ true, + /* withVkCom= */ false, + /* withCUDA= */ false, + /* withNgraph= */ false, + /* withWebnn= */ false, + /* withCann= */ false)); + } // namespace diff --git a/modules/dnn/src/cuda4dnn/csl/cublas.hpp b/modules/dnn/src/cuda4dnn/csl/cublas.hpp index 96cf70fab9..65e41a1399 100644 --- a/modules/dnn/src/cuda4dnn/csl/cublas.hpp +++ b/modules/dnn/src/cuda4dnn/csl/cublas.hpp @@ -425,8 +425,8 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu const auto batch_count = static_cast(batchCount); - AutoBuffer buffer(3 * batch_count); - auto A_slices = (half**)(buffer.data()); + AutoBuffer buffer(3 * batch_count); + auto A_slices = buffer.data(); auto B_slices = A_slices + batch_count; auto C_slices = B_slices + batch_count; // collect A, B and C slices @@ -438,18 +438,18 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu const half **dev_A_slices = 0, **dev_B_slices = 0; half **dev_C_slices = 0; - cudaMalloc((void**)&dev_A_slices, batch_count * sizeof(half*)); - cudaMalloc((void**)&dev_B_slices, batch_count * sizeof(half*)); - cudaMalloc((void**)&dev_C_slices, batch_count * sizeof(half*)); - cudaMemcpy(dev_A_slices, A_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice); - cudaMemcpy(dev_B_slices, B_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice); - cudaMemcpy(dev_C_slices, C_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice); + CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_A_slices, batch_count * sizeof(half*))); + CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_B_slices, batch_count * sizeof(half*))); + CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_C_slices, batch_count * sizeof(half*))); + CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_A_slices, A_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice)); + CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_B_slices, B_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice)); + CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_C_slices, C_slices, batch_count * sizeof(half*), cudaMemcpyHostToDevice)); CUDA4DNN_CHECK_CUBLAS(cublasHgemmBatched(handle.get(), opa, opb, iM, iN, iK, &alpha, dev_A_slices, ilda, dev_B_slices, ildb, &beta, dev_C_slices, ildc, batch_count)); - cudaFree(dev_A_slices); - cudaFree(dev_B_slices); - cudaFree(dev_C_slices); + CUDA4DNN_CHECK_CUDA(cudaFree(dev_A_slices)); + CUDA4DNN_CHECK_CUDA(cudaFree(dev_B_slices)); + CUDA4DNN_CHECK_CUDA(cudaFree(dev_C_slices)); } template <> inline @@ -475,8 +475,8 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu const auto batch_count = static_cast(batchCount); - AutoBuffer buffer(3 * batch_count); - auto A_slices = (float**)(buffer.data()); + AutoBuffer buffer(3 * batch_count); + auto A_slices = buffer.data(); auto B_slices = A_slices + batch_count; auto C_slices = B_slices + batch_count; // collect A, B and C slices @@ -488,19 +488,19 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu const float **dev_A_slices = 0, **dev_B_slices = 0; float **dev_C_slices = 0; - cudaMalloc((void**)&dev_A_slices, batch_count * sizeof(float*)); - cudaMalloc((void**)&dev_B_slices, batch_count * sizeof(float*)); - cudaMalloc((void**)&dev_C_slices, batch_count * sizeof(float*)); - cudaMemcpy(dev_A_slices, A_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice); - cudaMemcpy(dev_B_slices, B_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice); - cudaMemcpy(dev_C_slices, C_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice); + CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_A_slices, batch_count * sizeof(float*))); + CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_B_slices, batch_count * sizeof(float*))); + CUDA4DNN_CHECK_CUDA(cudaMalloc((void**)&dev_C_slices, batch_count * sizeof(float*))); + CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_A_slices, A_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice)); + CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_B_slices, B_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice)); + CUDA4DNN_CHECK_CUDA(cudaMemcpy(dev_C_slices, C_slices, batch_count * sizeof(float*), cudaMemcpyHostToDevice)); // cuBLAS is column-major CUDA4DNN_CHECK_CUBLAS(cublasSgemmBatched(handle.get(), opa, opb, iM, iN, iK, &alpha, dev_A_slices, ilda, dev_B_slices, ildb, &beta, dev_C_slices, ildc, batch_count)); - cudaFree(dev_A_slices); - cudaFree(dev_B_slices); - cudaFree(dev_C_slices); + CUDA4DNN_CHECK_CUDA(cudaFree(dev_A_slices)); + CUDA4DNN_CHECK_CUDA(cudaFree(dev_B_slices)); + CUDA4DNN_CHECK_CUDA(cudaFree(dev_C_slices)); } }}}}} /* namespace cv::dnn::cuda4dnn::csl::cublas */ diff --git a/modules/dnn/src/init.cpp b/modules/dnn/src/init.cpp index b2946fc9ee..e99f62284e 100644 --- a/modules/dnn/src/init.cpp +++ b/modules/dnn/src/init.cpp @@ -200,6 +200,7 @@ void initializeLayerFactory() CV_DNN_REGISTER_LAYER_CLASS(Scatter, ScatterLayer); CV_DNN_REGISTER_LAYER_CLASS(ScatterND, ScatterNDLayer); CV_DNN_REGISTER_LAYER_CLASS(Tile, TileLayer); + CV_DNN_REGISTER_LAYER_CLASS(TopK, TopKLayer); CV_DNN_REGISTER_LAYER_CLASS(Quantize, QuantizeLayer); CV_DNN_REGISTER_LAYER_CLASS(Dequantize, DequantizeLayer); diff --git a/modules/dnn/src/layers/einsum_layer.cpp b/modules/dnn/src/layers/einsum_layer.cpp index f48eb329ab..771b4a097e 100644 --- a/modules/dnn/src/layers/einsum_layer.cpp +++ b/modules/dnn/src/layers/einsum_layer.cpp @@ -459,6 +459,7 @@ public: { CV_TRACE_FUNCTION(); CV_TRACE_ARG_VALUE(name, "name", name.c_str()); + CV_CheckEQ((size_t)inputs_arr.total(), (size_t)numInputs, "Number of inputs in forward and inputs during graph constructions do not match"); if (inputs_arr.depth() == CV_16F) { @@ -541,7 +542,7 @@ public: // Use either the preprocessed inputs (if it is available) or the corresponding raw inputs result = pairwiseOperandProcess(!result.empty() ? result : rawInputs[0], !result.empty() ? tmpResult : homogenizedInputDims[0], - (!preProcessedInputs.empty() && !preProcessedInputs[input].empty()) ? preProcessedInputs[input] : rawInputs[input], + (!preProcessedInputs[input].empty()) ? preProcessedInputs[input] : rawInputs[input], homogenizedInputDims[input], reducedDims, isFinalPair); @@ -605,8 +606,8 @@ void LayerEinsumImpl::preProcessInputs(InputArrayOfArrays& inputs_arr) std::vector inputs; inputs_arr.getMatVector(inputs); - preProcessedInputs.reserve(inputs.size()); - homogenizedInputDims.reserve(inputs.size()); + preProcessedInputs.resize(inputs.size()); + homogenizedInputDims.resize(inputs.size()); int inputIter = 0; for(const Mat& input : inputs) @@ -616,7 +617,7 @@ void LayerEinsumImpl::preProcessInputs(InputArrayOfArrays& inputs_arr) // variable to hold processed version of the original input MatShape input_dims = shape(input); if (input_dims.empty()){ - homogenizedInputDims.emplace_back(MatShape(numLetterIndices, 1)); + homogenizedInputDims[inputIter] = MatShape(numLetterIndices, 1); ++inputIter; continue; } @@ -672,9 +673,9 @@ void LayerEinsumImpl::preProcessInputs(InputArrayOfArrays& inputs_arr) { preprocessed = preprocessed.reshape(1, homogenizedInputDims_.size(), homogenizedInputDims_.data()); } + preProcessedInputs[inputIter] = preprocessed; + homogenizedInputDims[inputIter] = homogenizedInputDims_; - preProcessedInputs.emplace_back(preprocessed); - homogenizedInputDims.emplace_back(homogenizedInputDims_); ++inputIter; } } diff --git a/modules/dnn/src/layers/elementwise_layers.cpp b/modules/dnn/src/layers/elementwise_layers.cpp index ca1662d7c0..ab224b68a9 100644 --- a/modules/dnn/src/layers/elementwise_layers.cpp +++ b/modules/dnn/src/layers/elementwise_layers.cpp @@ -1520,10 +1520,10 @@ struct RoundFunctor : public BaseDefaultFunctor inline float calculate(float x) const { // Rounds to even numbers in halfway cases, so 2.5 -> 2, -2.5 -> -2 - int old_rounding_direction = std::fegetround(); - std::fesetround(FE_TONEAREST); + int old_rounding_direction = fegetround(); + fesetround(FE_TONEAREST); float y = std::nearbyint(x); - std::fesetround(old_rounding_direction); + fesetround(old_rounding_direction); return y; } diff --git a/modules/dnn/src/layers/topk_layer.cpp b/modules/dnn/src/layers/topk_layer.cpp new file mode 100644 index 0000000000..06b3ebdc37 --- /dev/null +++ b/modules/dnn/src/layers/topk_layer.cpp @@ -0,0 +1,228 @@ +// This file is part of OpenCV project. +// It is subject to the license terms in the LICENSE file found in the top-level directory +// of this distribution and at http://opencv.org/license.html. + +#include "../precomp.hpp" +#include "layers_common.hpp" + +#include + +namespace cv { namespace dnn { + +namespace { + +template +class ComparatorGreater { +public: + ComparatorGreater(const T* data, size_t step) + : data_(data), step_(step) {} + + void addOffset(size_t offset) { + data_ += offset; + } + + void minusOffset(size_t offset) { + data_ -= offset; + } + + bool operator()(const size_t lhs_idx, const size_t rhs_idx) { + T lhs = *(data_ + lhs_idx * step_), + rhs = *(data_ + rhs_idx * step_); + return (lhs > rhs || (lhs == rhs && lhs_idx < rhs_idx)); + } + +private: + const T* data_; + size_t step_; +}; + +template +class ComparatorLess { +public: + ComparatorLess(const T* data, size_t step) + : data_(data), step_(step) {} + + void addOffset(size_t offset) { + data_ += offset; + } + + void minusOffset(size_t offset) { + data_ -= offset; + } + + bool operator()(const size_t lhs_idx, const size_t rhs_idx) { + T lhs = *(data_ + lhs_idx * step_), + rhs = *(data_ + rhs_idx * step_); + return (lhs < rhs || (lhs == rhs && lhs_idx < rhs_idx)); + } + +private: + const T* data_; + size_t step_; +}; +} + +class TopKLayerImpl CV_FINAL : public TopKLayer +{ +public: + TopKLayerImpl(const LayerParams& params) + { + setParamsFrom(params); + + axis = params.get("axis", -1); + largest = params.get("largest", 1) == 1; + sorted = params.get("sorted", 1) == 1; + CV_CheckTrue(sorted, "TopK: sorted == false is not supported"); // TODO: support sorted + + CV_CheckTrue(params.has("k"), "TopK: parameter k is required but missing"); + K = params.get("k"); + } + + virtual bool supportBackend(int backendId) CV_OVERRIDE + { + return backendId == DNN_BACKEND_OPENCV; + } + + virtual bool getMemoryShapes(const std::vector &inputs, + const int requiredOutputs, + std::vector &outputs, + std::vector &internals) const CV_OVERRIDE + { + const auto &input_shape = inputs.front(); + int input_dims = input_shape.size(); + + // Check if axis is valid + CV_CheckGE(axis, -input_dims, "TopK: axis is out of range"); + CV_CheckLT(axis, input_dims, "TopK: axis is out of range"); + // Normalize axis + int axis_normalized = normalize_axis(axis, input_shape.size()); + + // Check if K is in range (0, input_shape[axis]) + CV_CheckGT(K, 0, "TopK: K needs to be a positive integer"); + CV_CheckLT(K, input_shape[axis_normalized], "TopK: K is out of range"); + + // Assign output shape + auto output_shape = input_shape; + output_shape[axis_normalized] = K; + outputs.assign(1, output_shape); + outputs.assign(2, output_shape); // TODO: support indices of type CV_32S on 5.x + + return false; + } + + virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE { + std::vector inputs; + inputs_arr.getMatVector(inputs); + + // Normalize axis + auto input_shape = shape(inputs.front()); + axis = normalize_axis(axis, input_shape.size()); + } + + template + void FindTopK(const Mat &input, Mat &output_value, Mat &output_index) { + const auto input_shape = shape(input); + size_t loops = std::accumulate(input_shape.begin(), input_shape.begin() + axis, 1, std::multiplies()); + size_t step = std::accumulate(input_shape.begin() + axis + 1, input_shape.end(), 1, std::multiplies()); + int dim_axis = input_shape[axis]; + if (loops == 1) { + auto worker = [&](const Range &r) { + const auto *input_ptr = input.ptr(); // TODO: support other input type + auto *output_value_ptr = output_value.ptr(); + auto *output_index_ptr = output_index.ptr(); // TODO: use CV_32S on 5.x + + Comparator cmp(input_ptr, step); + + AutoBuffer buffer_index(dim_axis); + auto *buffer_index_ptr = buffer_index.data(); + for (int offset = r.start; offset < r.end; offset++) { + const auto *input_offset_ptr = input_ptr + offset; + cmp.addOffset(offset); + + std::iota(buffer_index_ptr, buffer_index_ptr + dim_axis, 0); + std::stable_sort(buffer_index_ptr, buffer_index_ptr + dim_axis, cmp); + + auto *output_value_offset_ptr = output_value_ptr + offset; + auto *output_index_offset_ptr = output_index_ptr + offset; + for (int i = 0; i < K; i++) { + int source_index = buffer_index_ptr[i]; + output_value_offset_ptr[i * step] = *(input_offset_ptr + source_index * step); + output_index_offset_ptr[i * step] = source_index; + } + cmp.minusOffset(offset); + } + }; + parallel_for_(Range(0, step), worker); + } else { + auto worker = [&](const Range &r) { + const auto *input_ptr = input.ptr(); + auto *output_value_ptr = output_value.ptr(); + auto *output_index_ptr = output_index.ptr(); + + Comparator cmp(input_ptr, step); + + AutoBuffer buffer_index(dim_axis); + auto *buffer_index_ptr = buffer_index.data(); + for (int batch_index = r.start; batch_index < r.end; batch_index++) { + for (size_t offset = 0; offset < step; offset++) { + const auto *input_offset_ptr = input_ptr + batch_index * dim_axis * step + offset; + cmp.addOffset(batch_index * dim_axis * step + offset); + + std::iota(buffer_index_ptr, buffer_index_ptr + dim_axis, 0); + std::stable_sort(buffer_index_ptr, buffer_index_ptr + dim_axis, cmp); + + auto *output_value_offset_ptr = output_value_ptr + batch_index * K * step + offset; + auto *output_index_offset_ptr = output_index_ptr + batch_index * K * step + offset; + for (int i = 0; i < K; i++) { + int source_index = buffer_index_ptr[i]; + output_value_offset_ptr[i * step] = *(input_offset_ptr + source_index * step); + output_index_offset_ptr[i * step] = source_index; + } + cmp.minusOffset(batch_index * dim_axis * step + offset); + } + } + }; + parallel_for_(Range(0, loops), worker); + } + } + + void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE + { + CV_TRACE_FUNCTION(); + CV_TRACE_ARG_VALUE(name, "name", name.c_str()); + + if (inputs_arr.depth() == CV_16F) + { + forward_fallback(inputs_arr, outputs_arr, internals_arr); + return; + } + + std::vector inputs, outputs; + inputs_arr.getMatVector(inputs); + outputs_arr.getMatVector(outputs); + + const auto &input = inputs.front(); + auto &output_value = outputs.front(); + auto &output_index = outputs.back(); + + if (largest) { + FindTopK>(input, output_value, output_index); + } else { + FindTopK>(input, output_value, output_index); + } + } + +private: + int axis; + bool largest; + bool sorted; + + int K; // FIXIT: make it layer input once dynamic shape is supported +}; + +Ptr TopKLayer::create(const LayerParams& params) +{ + return makePtr(params); +} + +}} // namespace cv::dnn diff --git a/modules/dnn/src/onnx/onnx_importer.cpp b/modules/dnn/src/onnx/onnx_importer.cpp index c5f7dfacbe..87761298df 100644 --- a/modules/dnn/src/onnx/onnx_importer.cpp +++ b/modules/dnn/src/onnx/onnx_importer.cpp @@ -195,6 +195,7 @@ private: void parseScatter (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto); void parseTile (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto); void parseLayerNorm (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto); + void parseTopK (LayerParams& LayerParams, const opencv_onnx::NodeProto& node_proto); void parseSimpleLayers (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto); void parseEinsum (LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto); @@ -3162,6 +3163,21 @@ void ONNXImporter::parseLayerNorm(LayerParams& layerParams, const opencv_onnx::N } } +void ONNXImporter::parseTopK(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto) +{ + // K needs to be constant in case of being input (since opset 10) + if (node_proto.input_size() == 2) { + bool K_const = constBlobs.find(node_proto.input(1)) != constBlobs.end(); + CV_CheckTrue(K_const, "OnnxImporter/TopK: K being non-constant is not supported"); + + Mat input_K = getBlob(node_proto, 1); + int K = input_K.at(0); + layerParams.set("k", K); + } + + addLayer(layerParams, node_proto); +} + void ONNXImporter::parseSimpleLayers(LayerParams& layerParams, const opencv_onnx::NodeProto& node_proto) { bool is_all_input_const = true; @@ -3972,6 +3988,7 @@ void ONNXImporter::buildDispatchMap_ONNX_AI(int opset_version) dispatch["Tile"] = &ONNXImporter::parseTile; dispatch["LayerNormalization"] = &ONNXImporter::parseLayerNorm; dispatch["GroupNormalization"] = &ONNXImporter::parseInstanceNormalization; + dispatch["TopK"] = &ONNXImporter::parseTopK; dispatch["Equal"] = dispatch["Greater"] = dispatch["Less"] = dispatch["Pow"] = dispatch["Add"] = dispatch["Sub"] = dispatch["Mul"] = dispatch["Div"] = dispatch["GreaterOrEqual"] = diff --git a/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp16_denylist.inl.hpp b/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp16_denylist.inl.hpp index 8dc970fe1e..ca2ffe42f1 100644 --- a/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp16_denylist.inl.hpp +++ b/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp16_denylist.inl.hpp @@ -2,20 +2,8 @@ "test_dequantizelinear", "test_dequantizelinear_axis", "test_dequantizelinear_blocked", -"test_dropout_default_ratio", -"test_globalmaxpool", -"test_globalmaxpool_precomputed", "test_logsoftmax_large_number", "test_logsoftmax_large_number_expanded", -"test_maxpool_1d_default", -"test_maxpool_2d_ceil", -"test_maxpool_2d_default", -"test_maxpool_2d_pads", -"test_maxpool_2d_precomputed_pads", -"test_maxpool_2d_precomputed_same_upper", -"test_maxpool_2d_precomputed_strides", -"test_maxpool_2d_same_upper", -"test_maxpool_2d_strides", "test_maxpool_3d_default", "test_pow", "test_quantizelinear", @@ -23,12 +11,7 @@ "test_quantizelinear_blocked", "test_softmax_large_number", "test_softmax_large_number_expanded", -"test_split_equal_parts_1d", -"test_split_equal_parts_2d", -"test_split_equal_parts_default_axis", "test_tan", -"test_reduce_l2_default_axes_keepdims_example", // Expected: (normL1) <= (l1), actual: 0.00490189 vs 0.004 -"test_reduce_log_sum_exp_default_axes_keepdims_example", // Expected: (normL1) <= (l1), actual: 0.00671387 vs 0.004 "test_reduce_prod_default_axes_keepdims_example", // Expected: (normL1) <= (l1), actual: inf vs 0.004 "test_reduce_prod_default_axes_keepdims_random", // Expected: (normL1) <= (l1), actual: 18.6621 vs 0.004, Expected: (normInf) <= (lInf), actual: 18.6621 vs 0.02 "test_reduce_prod_do_not_keepdims_random", // Expected: (normL1) <= (l1), actual: 0.00436729 vs 0.004, Expected: (normInf) <= (lInf), actual: 0.0201836 vs 0.02 @@ -38,16 +21,3 @@ "test_reduce_sum_square_do_not_keepdims_random", // Expected: (normL1) <= (l1), actual: 0.010789 vs 0.004, Expected: (normInf) <= (lInf), actual: 0.0290298 vs 0.02 "test_reduce_sum_square_keepdims_random", // Expected: (normL1) <= (l1), actual: 0.010789 vs 0.004, Expected: (normInf) <= (lInf), actual: 0.0290298 vs 0.02 "test_reduce_sum_square_negative_axes_keepdims_random", // Expected: (normL1) <= (l1), actual: 0.010789 vs 0.004, Expected: (normInf) <= (lInf), actual: 0.0290298 vs 0.02 -"test_scatter_elements_with_axis", -"test_scatter_elements_with_duplicate_indices", -"test_scatter_elements_with_negative_indices", -"test_scatter_elements_with_reduction_max", -"test_scatter_elements_with_reduction_min", -"test_scatter_elements_without_axis", -"test_scatter_with_axis", -"test_scatter_without_axis", -"test_scatternd", -"test_scatternd_add", -"test_scatternd_max", -"test_scatternd_min", -"test_scatternd_multiply", diff --git a/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp32_denylist.inl.hpp b/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp32_denylist.inl.hpp index 2453e2ad9f..99eb89409a 100644 --- a/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp32_denylist.inl.hpp +++ b/modules/dnn/test/test_onnx_conformance_layer_filter_opencv_ocl_fp32_denylist.inl.hpp @@ -6,16 +6,3 @@ "test_quantizelinear", "test_quantizelinear_axis", "test_quantizelinear_blocked", -"test_scatter_elements_with_axis", -"test_scatter_elements_with_duplicate_indices", -"test_scatter_elements_with_negative_indices", -"test_scatter_elements_with_reduction_max", -"test_scatter_elements_with_reduction_min", -"test_scatter_elements_without_axis", -"test_scatter_with_axis", -"test_scatter_without_axis", -"test_scatternd", -"test_scatternd_add", -"test_scatternd_max", -"test_scatternd_min", -"test_scatternd_multiply", diff --git a/modules/dnn/test/test_onnx_importer.cpp b/modules/dnn/test/test_onnx_importer.cpp index 743c19a5e6..85d18a4e71 100644 --- a/modules/dnn/test/test_onnx_importer.cpp +++ b/modules/dnn/test/test_onnx_importer.cpp @@ -1019,6 +1019,10 @@ TEST_P(Test_ONNX_layers, MatMul_init_bcast) testONNXModels("matmul_init_bcast"); } +TEST_P(Test_ONNX_layers, MatMul_bcast_3dx2d) { + testONNXModels("matmul_bcast"); +} + TEST_P(Test_ONNX_layers, MatMulAdd) { #if defined(INF_ENGINE_RELEASE) && INF_ENGINE_VER_MAJOR_EQ(2022010000) @@ -3274,6 +3278,40 @@ TEST_P(Test_ONNX_layers, ClipDivSharedConstant) { testONNXModels("clip_div_shared_constant"); } +// Bug: https://github.com/opencv/opencv/issues/26076 +TEST_P(Test_ONNX_layers, DISABLED_TopK) { + auto test = [&](const std::string &basename, double l1 = 0, double lInf = 0) { + std::string onnxmodel = _tf("models/" + basename + ".onnx", true); + Mat input = readTensorFromONNX(_tf("data/input_" + basename + ".pb")); + Mat output_ref_val = readTensorFromONNX(_tf("data/output_" + basename + "_0.pb")), + output_ref_ind = readTensorFromONNX(_tf("data/output_" + basename + "_1.pb")); + + checkBackend(&input, &output_ref_val); + checkBackend(&input, &output_ref_ind); + Net net = readNetFromONNX(onnxmodel); + net.setPreferableBackend(backend); + net.setPreferableTarget(target); + + net.setInput(input); + std::vector outputs; + net.forward(outputs, std::vector{"values", "indices"}); + + Mat output_res_val = outputs.front(), + output_res_ind = outputs.back(); + + output_ref_ind.convertTo(output_ref_ind, CV_32F); // TODO: revise this conversion in 5.x + + normAssert(output_ref_val, output_res_val, (basename + " values").c_str(), l1 ? l1 : default_l1, lInf ? lInf : default_lInf); + normAssert(output_ref_ind, output_res_ind, (basename + " indices").c_str(), l1 ? l1 : default_l1, lInf ? lInf : default_lInf); + + expectNoFallbacksFromIE(net); + }; + + test("top_k"); + test("top_k_negative_axis"); + test("top_k_smallest"); +} + INSTANTIATE_TEST_CASE_P(/**/, Test_ONNX_nets, dnnBackendsAndTargets()); }} // namespace diff --git a/modules/features2d/misc/js/gen_dict.json b/modules/features2d/misc/js/gen_dict.json new file mode 100644 index 0000000000..6d79e42ac8 --- /dev/null +++ b/modules/features2d/misc/js/gen_dict.json @@ -0,0 +1,19 @@ +{ + "whitelist": + { + "Feature2D": ["detect", "compute", "detectAndCompute", "descriptorSize", "descriptorType", "defaultNorm", "empty", "getDefaultName"], + "BRISK": ["create", "getDefaultName"], + "ORB": ["create", "setMaxFeatures", "setScaleFactor", "setNLevels", "setEdgeThreshold", "setFastThreshold", "setFirstLevel", "setWTA_K", "setScoreType", "setPatchSize", "getFastThreshold", "getDefaultName"], + "MSER": ["create", "detectRegions", "setDelta", "getDelta", "setMinArea", "getMinArea", "setMaxArea", "getMaxArea", "setPass2Only", "getPass2Only", "getDefaultName"], + "FastFeatureDetector": ["create", "setThreshold", "getThreshold", "setNonmaxSuppression", "getNonmaxSuppression", "setType", "getType", "getDefaultName"], + "AgastFeatureDetector": ["create", "setThreshold", "getThreshold", "setNonmaxSuppression", "getNonmaxSuppression", "setType", "getType", "getDefaultName"], + "GFTTDetector": ["create", "setMaxFeatures", "getMaxFeatures", "setQualityLevel", "getQualityLevel", "setMinDistance", "getMinDistance", "setBlockSize", "getBlockSize", "setHarrisDetector", "getHarrisDetector", "setK", "getK", "getDefaultName"], + "SimpleBlobDetector": ["create", "setParams", "getParams", "getDefaultName"], + "SimpleBlobDetector_Params": [], + "KAZE": ["create", "setExtended", "getExtended", "setUpright", "getUpright", "setThreshold", "getThreshold", "setNOctaves", "getNOctaves", "setNOctaveLayers", "getNOctaveLayers", "setDiffusivity", "getDiffusivity", "getDefaultName"], + "AKAZE": ["create", "setDescriptorType", "getDescriptorType", "setDescriptorSize", "getDescriptorSize", "setDescriptorChannels", "getDescriptorChannels", "setThreshold", "getThreshold", "setNOctaves", "getNOctaves", "setNOctaveLayers", "getNOctaveLayers", "setDiffusivity", "getDiffusivity", "getDefaultName"], + "DescriptorMatcher": ["add", "clear", "empty", "isMaskSupported", "train", "match", "knnMatch", "radiusMatch", "clone", "create"], + "BFMatcher": ["isMaskSupported", "create"], + "": ["drawKeypoints", "drawMatches", "drawMatchesKnn"] + } +} diff --git a/modules/features2d/src/keypoint.cpp b/modules/features2d/src/keypoint.cpp index 4d2007f6d7..aad9fe36e3 100644 --- a/modules/features2d/src/keypoint.cpp +++ b/modules/features2d/src/keypoint.cpp @@ -96,7 +96,9 @@ struct RoiPredicate bool operator()( const KeyPoint& keyPt ) const { - return !r.contains( keyPt.pt ); + // workaround for https://github.com/opencv/opencv/issues/26016 + // To keep its behaviour, keyPt.pt casts to Point_. + return !r.contains( Point_(keyPt.pt) ); } Rect r; diff --git a/modules/gapi/src/backends/ie/giebackend.cpp b/modules/gapi/src/backends/ie/giebackend.cpp index 4fc4fe9a8d..119539db42 100644 --- a/modules/gapi/src/backends/ie/giebackend.cpp +++ b/modules/gapi/src/backends/ie/giebackend.cpp @@ -1232,8 +1232,8 @@ void cv::gimpl::ie::GIEExecutable::run(cv::gimpl::GIslandExecutable::IInput &in // General algorithm: // 1. Collect island inputs/outputs. // 2. Create kernel context. (Every kernel has his own context). - // 3. If the EndOfStream message is recieved, wait until all passed task are done. - // 4. If the Exception message is revieved, propagate it further. + // 3. If the EndOfStream message is received, wait until all passed task are done. + // 4. If the Exception message is received, propagate it further. // 5. // 5.1 Run the kernel. // 5.2 Kernel wait for all nececcary infer requests and start asynchronous execution. diff --git a/modules/gapi/src/backends/ov/govbackend.cpp b/modules/gapi/src/backends/ov/govbackend.cpp index dde4da2bb7..ed2b6fd94e 100644 --- a/modules/gapi/src/backends/ov/govbackend.cpp +++ b/modules/gapi/src/backends/ov/govbackend.cpp @@ -622,7 +622,7 @@ static void PostOutputs(::ov::InferRequest &infer_request, ctx->eptr = std::move(eptr); for (auto i : ade::util::iota(ctx->uu.params.num_out)) { - // NB: Copy data back only if execution finished sucessfuly + // NB: Copy data back only if execution finished successfully // and inference only mode is disabled. // Otherwise just post outputs to maintain streaming executor contract. if (!ctx->eptr && !ctx->getOptions().inference_only) { diff --git a/modules/gapi/test/streaming/gapi_streaming_tests.cpp b/modules/gapi/test/streaming/gapi_streaming_tests.cpp index e50e11d5c8..10f8df820f 100644 --- a/modules/gapi/test/streaming/gapi_streaming_tests.cpp +++ b/modules/gapi/test/streaming/gapi_streaming_tests.cpp @@ -316,7 +316,7 @@ public: static std::string exception_msg() { - return "InvalidSource sucessfuly failed!"; + return "InvalidSource successfully failed!"; } bool pull(cv::gapi::wip::Data& d) override { @@ -355,7 +355,7 @@ GAPI_OCV_KERNEL(GThrowExceptionKernel, GThrowExceptionOp) { static std::string exception_msg() { - return "GThrowExceptionKernel sucessfuly failed"; + return "GThrowExceptionKernel successfully failed"; } static void run(const cv::Mat&, cv::Mat&) diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index a39c36de92..b9f3cb10de 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -236,7 +236,7 @@ enum MorphShapes { MORPH_CROSS = 1, //!< a cross-shaped structuring element: //!< \f[E_{ij} = \begin{cases} 1 & \texttt{if } {i=\texttt{anchor.y } {or } {j=\texttt{anchor.x}}} \\0 & \texttt{otherwise} \end{cases}\f] MORPH_ELLIPSE = 2 //!< an elliptic structuring element, that is, a filled ellipse inscribed - //!< into the rectangle Rect(0, 0, esize.width, 0.esize.height) + //!< into the rectangle Rect(0, 0, esize.width, esize.height) }; //! @} imgproc_filter diff --git a/modules/imgproc/misc/js/gen_dict.json b/modules/imgproc/misc/js/gen_dict.json new file mode 100644 index 0000000000..7b8c959fdd --- /dev/null +++ b/modules/imgproc/misc/js/gen_dict.json @@ -0,0 +1,95 @@ +{ + "whitelist": + { + "": [ + "Canny", + "GaussianBlur", + "Laplacian", + "HoughLines", + "HoughLinesP", + "HoughCircles", + "Scharr", + "Sobel", + "adaptiveThreshold", + "approxPolyDP", + "arcLength", + "bilateralFilter", + "blur", + "boundingRect", + "boxFilter", + "calcBackProject", + "calcHist", + "circle", + "compareHist", + "connectedComponents", + "connectedComponentsWithStats", + "contourArea", + "convexHull", + "convexityDefects", + "cornerHarris", + "cornerMinEigenVal", + "createCLAHE", + "createLineSegmentDetector", + "cvtColor", + "demosaicing", + "dilate", + "distanceTransform", + "distanceTransformWithLabels", + "drawContours", + "ellipse", + "ellipse2Poly", + "equalizeHist", + "erode", + "filter2D", + "findContours", + "fitEllipse", + "fitLine", + "floodFill", + "getAffineTransform", + "getPerspectiveTransform", + "getRotationMatrix2D", + "getStructuringElement", + "goodFeaturesToTrack", + "grabCut", + "integral", + "integral2", + "isContourConvex", + "line", + "matchShapes", + "matchTemplate", + "medianBlur", + "minAreaRect", + "minEnclosingCircle", + "moments", + "morphologyEx", + "pointPolygonTest", + "putText", + "pyrDown", + "pyrUp", + "rectangle", + "remap", + "resize", + "sepFilter2D", + "threshold", + "warpAffine", + "warpPerspective", + "warpPolar", + "watershed", + "fillPoly", + "fillConvexPoly", + "polylines" + ], + "CLAHE": ["apply", "collectGarbage", "getClipLimit", "getTilesGridSize", "setClipLimit", "setTilesGridSize"], + "segmentation_IntelligentScissorsMB": [ + "IntelligentScissorsMB", + "setWeights", + "setGradientMagnitudeMaxLimit", + "setEdgeFeatureZeroCrossingParameters", + "setEdgeFeatureCannyParameters", + "applyImage", + "applyImageFeatures", + "buildMap", + "getContour" + ] + } +} diff --git a/modules/python/test/test_imgproc.py b/modules/imgproc/misc/python/test/test_imgproc.py similarity index 63% rename from modules/python/test/test_imgproc.py rename to modules/imgproc/misc/python/test/test_imgproc.py index ede1eb157b..db5a34f708 100644 --- a/modules/python/test/test_imgproc.py +++ b/modules/imgproc/misc/python/test/test_imgproc.py @@ -1,19 +1,25 @@ #!/usr/bin/env python -''' -Test for disctrete fourier transform (dft) -''' - -# Python 2/3 compatibility from __future__ import print_function -import cv2 as cv import numpy as np -import sys +import cv2 as cv from tests_common import NewOpenCVTests -class imgproc_test(NewOpenCVTests): +class Imgproc_Tests(NewOpenCVTests): + + def test_python_986(self): + cntls = [] + img = np.zeros((100,100,3), dtype=np.uint8) + color = (0,0,0) + cnts = np.array(cntls, dtype=np.int32).reshape((1, -1, 2)) + try: + cv.fillPoly(img, cnts, color) + assert False + except: + assert True + def test_filter2d(self): img = self.get_sample('samples/data/lena.jpg', 1) eps = 0.001 @@ -22,6 +28,3 @@ class imgproc_test(NewOpenCVTests): img_blur0 = cv.filter2D(img, cv.CV_32F, kernel*(1./9)) img_blur1 = cv.filter2Dp(img, kernel, ddepth=cv.CV_32F, scale=1./9) self.assertLess(cv.norm(img_blur0 - img_blur1, cv.NORM_INF), eps) - -if __name__ == '__main__': - NewOpenCVTests.bootstrap() diff --git a/modules/imgproc/perf/perf_warp.cpp b/modules/imgproc/perf/perf_warp.cpp index 3716e663f9..688d449a55 100644 --- a/modules/imgproc/perf/perf_warp.cpp +++ b/modules/imgproc/perf/perf_warp.cpp @@ -13,7 +13,7 @@ CV_ENUM(InterTypeExtended, INTER_NEAREST, INTER_LINEAR, WARP_RELATIVE_MAP) CV_ENUM(RemapMode, HALF_SIZE, UPSIDE_DOWN, REFLECTION_X, REFLECTION_BOTH) typedef TestBaseWithParam< tuple > TestWarpAffine; -typedef TestBaseWithParam< tuple > TestWarpPerspective; +typedef TestBaseWithParam< tuple > TestWarpPerspective; typedef TestBaseWithParam< tuple > TestWarpPerspectiveNear_t; typedef TestBaseWithParam< tuple > TestRemap; @@ -42,11 +42,7 @@ PERF_TEST_P( TestWarpAffine, WarpAffine, TEST_CYCLE() warpAffine( src, dst, warpMat, sz, interType, borderMode, borderColor ); -#ifdef __ANDROID__ - SANITY_CHECK(dst, interType==INTER_LINEAR? 5 : 10); -#else SANITY_CHECK(dst, 1); -#endif } PERF_TEST_P(TestWarpAffine, DISABLED_WarpAffine_ovx, @@ -72,29 +68,28 @@ PERF_TEST_P(TestWarpAffine, DISABLED_WarpAffine_ovx, TEST_CYCLE() warpAffine(src, dst, warpMat, sz, interType, borderMode, borderColor); -#ifdef __ANDROID__ - SANITY_CHECK(dst, interType == INTER_LINEAR ? 5 : 10); -#else SANITY_CHECK(dst, 1); -#endif } PERF_TEST_P( TestWarpPerspective, WarpPerspective, Combine( Values( szVGA, sz720p, sz1080p ), InterType::all(), - BorderMode::all() + BorderMode::all(), + Values(1, 3, 4) ) ) { Size sz, szSrc(512, 512); - int borderMode, interType; + int borderMode, interType, channels; sz = get<0>(GetParam()); interType = get<1>(GetParam()); borderMode = get<2>(GetParam()); + channels = get<3>(GetParam()); + Scalar borderColor = Scalar::all(150); - Mat src(szSrc,CV_8UC4), dst(sz, CV_8UC4); + Mat src(szSrc, CV_8UC(channels)), dst(sz, CV_8UC(channels)); cvtest::fillGradient(src); if(borderMode == BORDER_CONSTANT) cvtest::smoothBorder(src, borderColor, 1); Mat rotMat = getRotationMatrix2D(Point2f(src.cols/2.f, src.rows/2.f), 30., 2.2); @@ -110,29 +105,27 @@ PERF_TEST_P( TestWarpPerspective, WarpPerspective, TEST_CYCLE() warpPerspective( src, dst, warpMat, sz, interType, borderMode, borderColor ); -#ifdef __ANDROID__ - SANITY_CHECK(dst, interType==INTER_LINEAR? 5 : 10); -#else SANITY_CHECK(dst, 1); -#endif } PERF_TEST_P(TestWarpPerspective, DISABLED_WarpPerspective_ovx, Combine( Values(szVGA, sz720p, sz1080p), InterType::all(), - BorderMode::all() + BorderMode::all(), + Values(1) ) ) { Size sz, szSrc(512, 512); - int borderMode, interType; + int borderMode, interType, channels; sz = get<0>(GetParam()); interType = get<1>(GetParam()); borderMode = get<2>(GetParam()); + channels = get<3>(GetParam()); Scalar borderColor = Scalar::all(150); - Mat src(szSrc, CV_8UC1), dst(sz, CV_8UC1); + Mat src(szSrc, CV_8UC(channels)), dst(sz, CV_8UC(channels)); cvtest::fillGradient(src); if (borderMode == BORDER_CONSTANT) cvtest::smoothBorder(src, borderColor, 1); Mat rotMat = getRotationMatrix2D(Point2f(src.cols / 2.f, src.rows / 2.f), 30., 2.2); @@ -148,11 +141,7 @@ PERF_TEST_P(TestWarpPerspective, DISABLED_WarpPerspective_ovx, TEST_CYCLE() warpPerspective(src, dst, warpMat, sz, interType, borderMode, borderColor); -#ifdef __ANDROID__ - SANITY_CHECK(dst, interType == INTER_LINEAR ? 5 : 10); -#else SANITY_CHECK(dst, 1); -#endif } PERF_TEST_P( TestWarpPerspectiveNear_t, WarpPerspectiveNear, @@ -194,11 +183,7 @@ PERF_TEST_P( TestWarpPerspectiveNear_t, WarpPerspectiveNear, warpPerspective( src, dst, warpMat, size, interType, borderMode, borderColor ); } -#ifdef __ANDROID__ - SANITY_CHECK(dst, interType==INTER_LINEAR? 5 : 10); -#else SANITY_CHECK(dst, 1); -#endif } PERF_TEST_P( TestRemap, remap, diff --git a/modules/imgproc/src/color.hpp b/modules/imgproc/src/color.hpp index 1dd8f65ae4..e6130b6887 100644 --- a/modules/imgproc/src/color.hpp +++ b/modules/imgproc/src/color.hpp @@ -202,8 +202,8 @@ struct CvtHelper int stype = _src.type(); scn = CV_MAT_CN(stype), depth = CV_MAT_DEPTH(stype); - CV_Check(scn, VScn::contains(scn), "Invalid number of channels in input image"); - CV_Check(dcn, VDcn::contains(dcn), "Invalid number of channels in output image"); + CV_CheckChannels(scn, VScn::contains(scn), "Invalid number of channels in input image"); + CV_CheckChannels(dcn, VDcn::contains(dcn), "Invalid number of channels in output image"); CV_CheckDepth(depth, VDepth::contains(depth), "Unsupported depth of input image"); if (_src.getObj() == _dst.getObj()) // inplace processing (#6653) @@ -247,8 +247,8 @@ struct OclHelper int scn = src.channels(); int depth = src.depth(); - CV_Check(scn, VScn::contains(scn), "Invalid number of channels in input image"); - CV_Check(dcn, VDcn::contains(dcn), "Invalid number of channels in output image"); + CV_CheckChannels(scn, VScn::contains(scn), "Invalid number of channels in input image"); + CV_CheckChannels(dcn, VDcn::contains(dcn), "Invalid number of channels in output image"); CV_CheckDepth(depth, VDepth::contains(depth), "Unsupported depth of input image"); switch (sizePolicy) diff --git a/modules/imgproc/src/color.simd_helpers.hpp b/modules/imgproc/src/color.simd_helpers.hpp index 06b9ba3d06..ef675d129a 100644 --- a/modules/imgproc/src/color.simd_helpers.hpp +++ b/modules/imgproc/src/color.simd_helpers.hpp @@ -89,8 +89,8 @@ struct CvtHelper int stype = _src.type(); scn = CV_MAT_CN(stype), depth = CV_MAT_DEPTH(stype); - CV_Check(scn, VScn::contains(scn), "Invalid number of channels in input image"); - CV_Check(dcn, VDcn::contains(dcn), "Invalid number of channels in output image"); + CV_CheckChannels(scn, VScn::contains(scn), "Invalid number of channels in input image"); + CV_CheckChannels(dcn, VDcn::contains(dcn), "Invalid number of channels in output image"); CV_CheckDepth(depth, VDepth::contains(depth), "Unsupported depth of input image"); if (_src.getObj() == _dst.getObj()) // inplace processing (#6653) diff --git a/modules/imgproc/src/demosaicing.cpp b/modules/imgproc/src/demosaicing.cpp index dd285cab51..24baf16362 100644 --- a/modules/imgproc/src/demosaicing.cpp +++ b/modules/imgproc/src/demosaicing.cpp @@ -177,41 +177,102 @@ public: vst1_u8(dst + 8, p.val[1]); } #else - v_uint16x8 _b2y = v_setall_u16((ushort)(rcoeff*2)); - v_uint16x8 _g2y = v_setall_u16((ushort)(gcoeff*2)); - v_uint16x8 _r2y = v_setall_u16((ushort)(bcoeff*2)); + v_uint16x8 v255 = v_setall_u16(255); + v_int16x8 v_descale = v_setall_s16(static_cast(1 << 14)); + v_int16x8 dummy; + v_int16x8 cxrb; + v_int16x8 cxg2; + v_zip(v_setall_s16(static_cast(rcoeff)), + v_setall_s16(static_cast(bcoeff)), + cxrb, + dummy); + v_zip(v_setall_s16(static_cast(gcoeff)), + v_setall_s16(static_cast(2)), + cxg2, + dummy); + const uchar* bayer_end = bayer + width; - for( ; bayer <= bayer_end - 18; bayer += 14, dst += 14 ) + for (; bayer < bayer_end - 14; bayer += 14, dst += 14) { - v_uint16x8 r0 = v_reinterpret_as_u16(v_load(bayer)); - v_uint16x8 r1 = v_reinterpret_as_u16(v_load(bayer+bayer_step)); - v_uint16x8 r2 = v_reinterpret_as_u16(v_load(bayer+bayer_step*2)); + v_uint16x8 first_line = v_reinterpret_as_u16(v_load(bayer)); + v_uint16x8 second_line = v_reinterpret_as_u16(v_load(bayer + bayer_step)); + v_uint16x8 third_line = v_reinterpret_as_u16(v_load(bayer + bayer_step * 2)); - v_uint16x8 b1 = v_add(v_shr<7>(v_shl<8>(r0)), v_shr<7>(v_shl<8>(r2))); - v_uint16x8 b0 = v_add(v_rotate_right<1>(b1), b1); - b1 = v_shl<1>(v_rotate_right<1>(b1)); + // bayer[0] + v_uint16x8 first_line0 = v_and(first_line, v255); + // bayer[bayer_step*2] + v_uint16x8 third_line0 = v_and(third_line, v255); + // bayer[0] + bayer[bayer_step*2] + v_uint16x8 first_third_line0 = v_add(first_line0, third_line0); + // bayer[2] + bayer[bayer_step*2+2] + v_uint16x8 first_third_line2 = v_rotate_right<1>(first_third_line0); + // bayer[0] + bayer[2] + bayer[bayer_step*2] + bayer[bayer_step*2+2] + v_int16x8 r0 = v_reinterpret_as_s16(v_add(first_third_line0, first_third_line2)); + // (bayer[2] + bayer[bayer_step*2+2]) * 2 + v_int16x8 r1 = v_reinterpret_as_s16(v_shl<1>(first_third_line2)); - v_uint16x8 g0 = v_add(v_shr<7>(r0), v_shr<7>(r2)); - v_uint16x8 g1 = v_shr<7>(v_shl<8>(r1)); - g0 = v_add(g0, v_add(v_rotate_right<1>(g1), g1)); - g1 = v_shl<2>(v_rotate_right<1>(g1)); + // bayer[bayer_step+1] + v_uint16x8 second_line1 = v_shr<8>(second_line); + // bayer[bayer_step+1] * 4 + v_int16x8 b0 = v_reinterpret_as_s16(v_shl<2>(second_line1)); + // bayer[bayer_step+3] + v_uint16x8 second_line3 = v_rotate_right<1>(second_line1); + // bayer[bayer_step+1] + bayer[bayer_step+3] + v_uint16x8 second_line13 = v_add(second_line1, second_line3); + // (bayer[bayer_step+1] + bayer[bayer_step+3]) * 2 + v_int16x8 b1 = v_reinterpret_as_s16(v_shl(second_line13, 1)); - r0 = v_shr<8>(r1); - r1 = v_shl<2>(v_add(v_rotate_right<1>(r0), r0)); - r0 = v_shl<3>(r0); + // bayer[1] + v_uint16x8 first_line1 = v_shr<8>(first_line); + // bayer[bayer_step] + v_uint16x8 second_line0 = v_and(second_line, v255); + // bayer[bayer_step+2] + v_uint16x8 second_line2 = v_rotate_right<1>(second_line0); + // bayer[bayer_step] + bayer[bayer_step+2] + v_uint16x8 second_line02 = v_add(second_line0, second_line2); + // bayer[bayer_step*2+1] + v_uint16x8 third_line1 = v_shr<8>(third_line); + // bayer[1] + bayer[bayer_step*2+1] + v_uint16x8 first_third_line1 = v_add(first_line1, third_line1); + // bayer[1] + bayer[bayer_step] + bayer[bayer_step+2] + bayer[bayer_step*2+1] + v_int16x8 g0 = v_reinterpret_as_s16(v_add(first_third_line1, second_line02)); + // bayer[bayer_step+2] * 4 + v_int16x8 g1 = v_reinterpret_as_s16(v_shl<2>(second_line2)); - g0 = v_shr<2>(v_add(v_add(v_mul_hi(b0, _b2y), v_mul_hi(g0, _g2y)), v_mul_hi(r0, _r2y))); - g1 = v_shr<2>(v_add(v_add(v_mul_hi(b1, _b2y), v_mul_hi(g1, _g2y)), v_mul_hi(r1, _r2y))); - v_uint8x16 pack_lo, pack_hi; - v_zip(v_pack_u(v_reinterpret_as_s16(g0), v_reinterpret_as_s16(g0)), - v_pack_u(v_reinterpret_as_s16(g1), v_reinterpret_as_s16(g1)), - pack_lo, pack_hi); - v_store(dst, pack_lo); + v_int16x8 rb0; + v_int16x8 rb1; + v_int16x8 rb2; + v_int16x8 rb3; + v_zip(r0, b0, rb0, rb1); + v_zip(r1, b1, rb2, rb3); + + v_int16x8 gd0; + v_int16x8 gd1; + v_int16x8 gd2; + v_int16x8 gd3; + v_zip(g0, v_descale, gd0, gd1); + v_zip(g1, v_descale, gd2, gd3); + + v_int32x4 gray_even0 = v_shr<16>(v_add(v_dotprod(rb0, cxrb), v_dotprod(gd0, cxg2))); + v_int32x4 gray_even1 = v_shr<16>(v_add(v_dotprod(rb1, cxrb), v_dotprod(gd1, cxg2))); + v_int32x4 gray_odd0 = v_shr<16>(v_add(v_dotprod(rb2, cxrb), v_dotprod(gd2, cxg2))); + v_int32x4 gray_odd1 = v_shr<16>(v_add(v_dotprod(rb3, cxrb), v_dotprod(gd3, cxg2))); + + v_int16x8 gray_even = v_pack(gray_even0, gray_even1); + v_int16x8 gray_odd = v_pack(gray_odd0, gray_odd1); + + v_int16x8 gray_d0; + v_int16x8 gray_d1; + v_zip(gray_even, gray_odd, gray_d0, gray_d1); + + v_uint8x16 gray = v_pack(v_reinterpret_as_u16(gray_d0), v_reinterpret_as_u16(gray_d1)); + + v_store(dst, gray); } #endif - return (int)(bayer - (bayer_end - width)); + return static_cast(bayer - (bayer_end - width)); } int bayer2RGB(const uchar* bayer, int bayer_step, uchar* dst, int width, int blue) const diff --git a/modules/imgproc/src/drawing.cpp b/modules/imgproc/src/drawing.cpp index fb99f8b1bb..8d20a57a87 100644 --- a/modules/imgproc/src/drawing.cpp +++ b/modules/imgproc/src/drawing.cpp @@ -2044,8 +2044,11 @@ void fillPoly( InputOutputArray _img, const Point** pts, const int* npts, int nc edges.reserve( total + 1 ); for (i = 0; i < ncontours; i++) { - std::vector _pts(pts[i], pts[i] + npts[i]); - CollectPolyEdges(img, _pts.data(), npts[i], edges, buf, line_type, shift, offset); + if (npts[i] > 0 && pts[i]) + { + std::vector _pts(pts[i], pts[i] + npts[i]); + CollectPolyEdges(img, _pts.data(), npts[i], edges, buf, line_type, shift, offset); + } } FillEdgeCollection(img, edges, buf, line_type); @@ -2105,7 +2108,7 @@ void cv::fillPoly(InputOutputArray img, InputArrayOfArrays pts, for( i = 0; i < ncontours; i++ ) { Mat p = pts.getMat(manyContours ? i : -1); - CV_Assert(p.checkVector(2, CV_32S) >= 0); + CV_Assert(p.checkVector(2, CV_32S) > 0); ptsptr[i] = p.ptr(); npts[i] = p.rows*p.cols*p.channels()/2; } diff --git a/modules/imgproc/src/hal_replacement.hpp b/modules/imgproc/src/hal_replacement.hpp index 1409dda991..fe6019e3a7 100644 --- a/modules/imgproc/src/hal_replacement.hpp +++ b/modules/imgproc/src/hal_replacement.hpp @@ -1247,6 +1247,33 @@ inline int hal_ni_pyrdown(const uchar* src_data, size_t src_step, int src_width, #define cv_hal_pyrdown hal_ni_pyrdown //! @endcond +/** + @brief Perform Gaussian Blur and downsampling for input tile with optional margins for submatrix + @param src_data Source image data + @param src_step Source image step + @param src_width Source image width + @param src_height Source image height + @param dst_data Destination image data + @param dst_step Destination image step + @param dst_width Destination image width + @param dst_height Destination image height + @param depth Depths of source and destination image + @param cn Number of channels + @param margin_left Left margins for source image + @param margin_top Top margins for source image + @param margin_right Right margins for source image + @param margin_bottom Bottom margins for source image + @param border_type Border type +*/ +inline int hal_ni_pyrdown_offset(const uchar* src_data, size_t src_step, int src_width, int src_height, + uchar* dst_data, size_t dst_step, int dst_width, int dst_height, + int depth, int cn, int margin_left, int margin_top, int margin_right, int margin_bottom, int border_type) +{ return CV_HAL_ERROR_NOT_IMPLEMENTED; } + +//! @cond IGNORED +#define cv_hal_pyrdown_offset hal_ni_pyrdown_offset +//! @endcond + /** @brief Canny edge detector @param src_data Source image data diff --git a/modules/imgproc/src/median_blur.simd.hpp b/modules/imgproc/src/median_blur.simd.hpp index 1069d8abab..7cc0aa693c 100644 --- a/modules/imgproc/src/median_blur.simd.hpp +++ b/modules/imgproc/src/median_blur.simd.hpp @@ -845,7 +845,7 @@ void medianBlur(const Mat& src0, /*const*/ Mat& dst, int ksize) CV_INSTRUMENT_REGION(); bool useSortNet = ksize == 3 || (ksize == 5 -#if !(CV_SIMD) +#if !((CV_SIMD || CV_SIMD_SCALABLE)) && ( src0.depth() > CV_8U || src0.channels() == 2 || src0.channels() > 4 ) #endif ); @@ -881,7 +881,7 @@ void medianBlur(const Mat& src0, /*const*/ Mat& dst, int ksize) double img_size_mp = (double)(src0.total())/(1 << 20); if( ksize <= 3 + (img_size_mp < 1 ? 12 : img_size_mp < 4 ? 6 : 2)* - (CV_SIMD ? 1 : 3)) + ((CV_SIMD || CV_SIMD_SCALABLE) ? 1 : 3)) medianBlur_8u_Om( src, dst, ksize ); else medianBlur_8u_O1( src, dst, ksize ); diff --git a/modules/imgproc/src/pyramids.cpp b/modules/imgproc/src/pyramids.cpp index 1978638102..a1c9e7e14c 100644 --- a/modules/imgproc/src/pyramids.cpp +++ b/modules/imgproc/src/pyramids.cpp @@ -1265,69 +1265,6 @@ static bool ocl_pyrUp( InputArray _src, OutputArray _dst, const Size& _dsz, int } -#if defined(HAVE_IPP) -namespace cv -{ -static bool ipp_pyrdown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType ) -{ - CV_INSTRUMENT_REGION_IPP(); - -#if IPP_VERSION_X100 >= 810 && !IPP_DISABLE_PYRAMIDS_DOWN - Size dsz = _dsz.empty() ? Size((_src.cols() + 1)/2, (_src.rows() + 1)/2) : _dsz; - bool isolated = (borderType & BORDER_ISOLATED) != 0; - int borderTypeNI = borderType & ~BORDER_ISOLATED; - - Mat src = _src.getMat(); - _dst.create( dsz, src.type() ); - Mat dst = _dst.getMat(); - int depth = src.depth(); - - - { - bool isolated = (borderType & BORDER_ISOLATED) != 0; - int borderTypeNI = borderType & ~BORDER_ISOLATED; - if (borderTypeNI == BORDER_DEFAULT && (!src.isSubmatrix() || isolated) && dsz == Size(src.cols*2, src.rows*2)) - { - typedef IppStatus (CV_STDCALL * ippiPyrUp)(const void* pSrc, int srcStep, void* pDst, int dstStep, IppiSize srcRoi, Ipp8u* buffer); - int type = src.type(); - CV_SUPPRESS_DEPRECATED_START - ippiPyrUp pyrUpFunc = type == CV_8UC1 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_8u_C1R : - type == CV_8UC3 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_8u_C3R : - type == CV_32FC1 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_32f_C1R : - type == CV_32FC3 ? (ippiPyrUp) ippiPyrUp_Gauss5x5_32f_C3R : 0; - CV_SUPPRESS_DEPRECATED_END - - if (pyrUpFunc) - { - int bufferSize; - IppiSize srcRoi = { src.cols, src.rows }; - IppDataType dataType = depth == CV_8U ? ipp8u : ipp32f; - CV_SUPPRESS_DEPRECATED_START - IppStatus ok = ippiPyrUpGetBufSize_Gauss5x5(srcRoi.width, dataType, src.channels(), &bufferSize); - CV_SUPPRESS_DEPRECATED_END - if (ok >= 0) - { - Ipp8u* buffer = ippsMalloc_8u_L(bufferSize); - ok = pyrUpFunc(src.data, (int) src.step, dst.data, (int) dst.step, srcRoi, buffer); - ippsFree(buffer); - - if (ok >= 0) - { - CV_IMPL_ADD(CV_IMPL_IPP); - return true; - } - } - } - } - } -#else - CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(_dsz); CV_UNUSED(borderType); -#endif - return false; -} -} -#endif - void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borderType ) { CV_INSTRUMENT_REGION(); @@ -1343,15 +1280,19 @@ void cv::pyrDown( InputArray _src, OutputArray _dst, const Size& _dsz, int borde Mat dst = _dst.getMat(); int depth = src.depth(); - CALL_HAL(pyrDown, cv_hal_pyrdown, src.data, src.step, src.cols, src.rows, dst.data, dst.step, dst.cols, dst.rows, depth, src.channels(), borderType); - -#ifdef HAVE_IPP - bool isolated = (borderType & BORDER_ISOLATED) != 0; - int borderTypeNI = borderType & ~BORDER_ISOLATED; -#endif - CV_IPP_RUN(borderTypeNI == BORDER_DEFAULT && (!_src.isSubmatrix() || isolated) && dsz == Size((_src.cols() + 1)/2, (_src.rows() + 1)/2), - ipp_pyrdown( _src, _dst, _dsz, borderType)); - + if(src.isSubmatrix() && !(borderType & BORDER_ISOLATED)) + { + Point ofs; + Size wsz(src.cols, src.rows); + src.locateROI( wsz, ofs ); + CALL_HAL(pyrDown, cv_hal_pyrdown_offset, src.data, src.step, src.cols, src.rows, + dst.data, dst.step, dst.cols, dst.rows, depth, src.channels(), + ofs.x, ofs.y, wsz.width - src.cols - ofs.x, wsz.height - src.rows - ofs.y, borderType & (~BORDER_ISOLATED)); + } + else + { + CALL_HAL(pyrDown, cv_hal_pyrdown, src.data, src.step, src.cols, src.rows, dst.data, dst.step, dst.cols, dst.rows, depth, src.channels(), borderType); + } PyrFunc func = 0; if( depth == CV_8U ) diff --git a/modules/imgproc/test/test_color.cpp b/modules/imgproc/test/test_color.cpp index 1229a468eb..3bebb563de 100644 --- a/modules/imgproc/test/test_color.cpp +++ b/modules/imgproc/test/test_color.cpp @@ -1863,6 +1863,26 @@ TEST(Imgproc_ColorBayer, regression) EXPECT_EQ(0, countNonZero(diff.reshape(1) > 1)); } +TEST(Imgproc_ColorBayer2Gray, regression_25823) +{ + const int n = 100; + Mat src(n, n, CV_8UC1); + Mat dst; + + for (int i = 0; i < src.rows; ++i) + { + for (int j = 0; j < src.cols; ++j) + { + src.at(i, j) = (i + j) % 2; + } + } + + cvtColor(src, dst, COLOR_BayerBG2GRAY); + + Mat gold(n, n, CV_8UC1, Scalar(1)); + EXPECT_EQ(0, cv::norm(dst, gold, NORM_INF)); +} + TEST(Imgproc_ColorBayerVNG, regression) { cvtest::TS* ts = cvtest::TS::ptr(); @@ -3203,4 +3223,20 @@ TEST(ImgProc_RGB2Lab, NaN_21111) #endif } +// See https://github.com/opencv/opencv/issues/25971 +// If num of channels is not suitable for selected cv::ColorConversionCodes, +// e.code must be cv::Error::BadNumChannels. +TEST(ImgProc_cvtColor_InvalidNumOfChannels, regression_25971) +{ + try { + cv::Mat src = cv::Mat::zeros(100, 100, CV_8UC1); + cv::Mat dst; + EXPECT_THROW(cv::cvtColor(src, dst, COLOR_RGB2GRAY), cv::Exception); + }catch(const cv::Exception& e) { + EXPECT_EQ(e.code, cv::Error::BadNumChannels); + }catch(...) { + FAIL() << "Unexpected exception is happened."; + } +} + }} // namespace diff --git a/modules/imgproc/test/test_cornersubpix.cpp b/modules/imgproc/test/test_cornersubpix.cpp index 86484d2482..5a2e633397 100644 --- a/modules/imgproc/test/test_cornersubpix.cpp +++ b/modules/imgproc/test/test_cornersubpix.cpp @@ -65,4 +65,31 @@ TEST(Imgproc_CornerSubPix, out_of_image_corners) ASSERT_TRUE(Rect(0, 0, image.cols, image.rows).contains(corners.front())); } +// See https://github.com/opencv/opencv/issues/26016 +TEST(Imgproc_CornerSubPix, corners_on_the_edge) +{ + cv::Mat image(500, 500, CV_8UC1); + cv::Size win(1, 1); + cv::Size zeroZone(-1, -1); + cv::TermCriteria criteria; + + std::vector cornersOK1 = { cv::Point2f(250, std::nextafter(499.5f, 499.5f - 1.0f)) }; + EXPECT_NO_THROW( cv::cornerSubPix(image, cornersOK1, win, zeroZone, criteria) ) << cornersOK1; + + std::vector cornersOK2 = { cv::Point2f(250, 499.5f) }; + EXPECT_NO_THROW( cv::cornerSubPix(image, cornersOK2, win, zeroZone, criteria) ) << cornersOK2; + + std::vector cornersOK3 = { cv::Point2f(250, std::nextafter(499.5f, 499.5f + 1.0f)) }; + EXPECT_NO_THROW( cv::cornerSubPix(image, cornersOK3, win, zeroZone, criteria) ) << cornersOK3; + + std::vector cornersOK4 = { cv::Point2f(250, std::nextafter(500.0f, 500.0f - 1.0f)) }; + EXPECT_NO_THROW( cv::cornerSubPix(image, cornersOK4, win, zeroZone, criteria) ) << cornersOK4; + + std::vector cornersNG1 = { cv::Point2f(250, 500.0f) }; + EXPECT_ANY_THROW( cv::cornerSubPix(image, cornersNG1, win, zeroZone, criteria) ) << cornersNG1; + + std::vector cornersNG2 = { cv::Point2f(250, std::nextafter(500.0f, 500.0f + 1.0f)) }; + EXPECT_ANY_THROW( cv::cornerSubPix(image, cornersNG2, win, zeroZone, criteria) ) << cornersNG2; +} + }} // namespace diff --git a/modules/java/android_sdk/CMakeLists.txt b/modules/java/android_sdk/CMakeLists.txt index b5fbc3d93d..a9e6ff52af 100644 --- a/modules/java/android_sdk/CMakeLists.txt +++ b/modules/java/android_sdk/CMakeLists.txt @@ -15,6 +15,59 @@ file(MAKE_DIRECTORY "${java_src_dir}") ocv_copyfiles_append_dir(JAVA_SRC_COPY "${OPENCV_JAVA_BINDINGS_DIR}/gen/java" "${java_src_dir}") +set(SOURSE_SETS_JNI_LIBS_SRC_DIRS "'native/libs'") +set(SOURSE_SETS_JAVA_SRC_DIRS "'java/src'") +set(SOURSE_SETS_RES_SRC_DIRS "'java/res'") +set(SOURSE_SETS_MANIFEST_SRC_FILE "'java/AndroidManifest.xml'") +set(BUILD_GRADLE_COMPILE_OPTIONS " + compileOptions { + sourceCompatibility JavaVersion.VERSION_${ANDROID_GRADLE_JAVA_VERSION_INIT} + targetCompatibility JavaVersion.VERSION_${ANDROID_GRADLE_JAVA_VERSION_INIT} + } +") +set(MAVEN_PUBLISH_PLUGIN_DECLARATION "apply plugin: 'maven-publish'") +set(BUILD_GRADLE_ANDROID_PUBLISHING_CONFIG " + buildFeatures { + prefabPublishing true + buildConfig true + } + + prefab { + opencv_jni_shared { + headers 'native/jni/include' + } + } + + publishing { + singleVariant('release') { + withSourcesJar() + withJavadocJar() + } + } +") + +set(BUILD_GRADLE_PUBLISHING_CONFIG " +publishing { + publications { + release(MavenPublication) { + groupId = 'org.opencv' + artifactId = 'opencv' + version = '${OPENCV_VERSION_PLAIN}' + + afterEvaluate { + from components.release + } + } + } + repositories { + maven { + name = 'myrepo' + url = \"\${project.buildDir}/repo\" + } + } +} +") + if(ANDROID_EXECUTABLE) ocv_assert(ANDROID_TOOLS_Pkg_Revision GREATER 13) @@ -108,6 +161,7 @@ if(ANDROID_NATIVE_API_LEVEL GREATER 21) else() ocv_update(ANDROID_TARGET_SDK_VERSION "21") endif() + configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.gradle.in" "${CMAKE_CURRENT_BINARY_DIR}/build.gradle" @ONLY) install(FILES "${CMAKE_CURRENT_BINARY_DIR}/build.gradle" DESTINATION ${JAVA_INSTALL_ROOT}/.. COMPONENT java) @@ -117,12 +171,23 @@ else() # gradle build # Android Gradle-based project # +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.gradle.in" "${ANDROID_TMP_INSTALL_BASE_DIR}/opencv/build.gradle" @ONLY) + #TODO: INSTALL ONLY ocv_copyfiles_append_dir(JAVA_SRC_COPY "${OPENCV_JAVA_BINDINGS_DIR}/gen/android/java" "${java_src_dir}") ocv_copyfiles_append_dir(JAVA_SRC_COPY "${OPENCV_JAVA_BINDINGS_DIR}/gen/android-21/java" "${java_src_dir}") ocv_copyfiles_append_dir(JAVA_SRC_COPY "${OPENCV_JAVA_BINDINGS_DIR}/gen/android-24/java" "${java_src_dir}") # copy boilerplate +set(SOURSE_SETS_JNI_LIBS_SRC_DIRS "'../../jni'") +set(SOURSE_SETS_JAVA_SRC_DIRS "'src'") +set(SOURSE_SETS_RES_SRC_DIRS "'${OpenCV_SOURCE_DIR}/modules/java/android_sdk/android_gradle_lib/res'") +set(SOURSE_SETS_MANIFEST_SRC_FILE "'AndroidManifest.xml'") +set(BUILD_GRADLE_COMPILE_OPTIONS "") +set(MAVEN_PUBLISH_PLUGIN_DECLARATION "") +set(BUILD_GRADLE_ANDROID_PUBLISHING_CONFIG "") +set(BUILD_GRADLE_PUBLISHING_CONFIG "") + set(__base_dir "${CMAKE_CURRENT_SOURCE_DIR}/android_gradle_lib/") file(GLOB_RECURSE seed_project_files_rel RELATIVE "${__base_dir}/" "${__base_dir}/*") list(REMOVE_ITEM seed_project_files_rel "${ANDROID_MANIFEST_FILE}") @@ -134,6 +199,7 @@ foreach(file ${seed_project_files_rel}) install(FILES "${OPENCV_JAVA_DIR}/${file}" DESTINATION "${JAVA_INSTALL_ROOT}/${install_subdir}" COMPONENT java) endif() endforeach() +configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.gradle.in" "${OPENCV_JAVA_DIR}/build.gradle" @ONLY) # copy libcxx_helper set(__base_dir "${CMAKE_CURRENT_SOURCE_DIR}/") @@ -165,7 +231,6 @@ file(REMOVE "${OPENCV_DEPHELPER}/${the_module}_android") # force rebuild after add_custom_target(${the_module}_android ALL DEPENDS "${OPENCV_DEPHELPER}/${the_module}_android" SOURCES "${__base_dir}/${ANDROID_MANIFEST_FILE}") -configure_file("${CMAKE_CURRENT_SOURCE_DIR}/build.gradle.in" "${ANDROID_TMP_INSTALL_BASE_DIR}/opencv/build.gradle" @ONLY) install(FILES "${ANDROID_TMP_INSTALL_BASE_DIR}/opencv/build.gradle" DESTINATION ${JAVA_INSTALL_ROOT}/.. COMPONENT java) install(DIRECTORY "${java_src_dir}" DESTINATION "${JAVA_INSTALL_ROOT}" COMPONENT java) diff --git a/modules/java/android_sdk/android_gradle_lib/build.gradle b/modules/java/android_sdk/android_gradle_lib/build.gradle deleted file mode 100644 index 4394bd9a4e..0000000000 --- a/modules/java/android_sdk/android_gradle_lib/build.gradle +++ /dev/null @@ -1,58 +0,0 @@ -apply plugin: 'com.android.library' -@KOTLIN_PLUGIN_DECLARATION@ - -def openCVersionName = "@OPENCV_VERSION@" -def openCVersionCode = ((@OPENCV_VERSION_MAJOR@ * 100 + @OPENCV_VERSION_MINOR@) * 100 + @OPENCV_VERSION_PATCH@) * 10 + 0 - -android { - @OPENCV_ANDROID_NAMESPACE_DECLARATION@ - compileSdkVersion @ANDROID_COMPILE_SDK_VERSION@ - - defaultConfig { - minSdkVersion @ANDROID_MIN_SDK_VERSION@ - targetSdkVersion @ANDROID_TARGET_SDK_VERSION@ - - versionCode openCVersionCode - versionName openCVersionName - - externalNativeBuild { - cmake { - arguments "-DANDROID_STL=@ANDROID_STL@" - targets "opencv_jni_shared" - } - } - } - - buildTypes { - debug { - packagingOptions { - doNotStrip '**/*.so' // controlled by OpenCV CMake scripts - } - } - release { - packagingOptions { - doNotStrip '**/*.so' // controlled by OpenCV CMake scripts - } - minifyEnabled false - proguardFiles getDefaultProguardFile('proguard-android.txt'), 'proguard-rules.txt' - } - } - - sourceSets { - main { - jniLibs.srcDirs = ['../../jni'] - java.srcDirs = ['src'] // TODO Use original files instead of copied into build directory - res.srcDirs = ['@OpenCV_SOURCE_DIR@/modules/java/android_sdk/android_gradle_lib/res'] - manifest.srcFile 'AndroidManifest.xml' - } - } - - externalNativeBuild { - cmake { - path (project.projectDir.toString() + '/libcxx_helper/CMakeLists.txt') - } - } -} - -dependencies { -} diff --git a/modules/java/android_sdk/build.gradle.in b/modules/java/android_sdk/build.gradle.in index e61ad66708..0ecacd245a 100644 --- a/modules/java/android_sdk/build.gradle.in +++ b/modules/java/android_sdk/build.gradle.in @@ -89,7 +89,7 @@ // apply plugin: 'com.android.library' -apply plugin: 'maven-publish' +@MAVEN_PUBLISH_PLUGIN_DECLARATION@ try { @KOTLIN_PLUGIN_DECLARATION@ println "Configure OpenCV with Kotlin" @@ -120,12 +120,7 @@ android { } } } - - compileOptions { - sourceCompatibility JavaVersion.VERSION_@ANDROID_GRADLE_JAVA_VERSION_INIT@ - targetCompatibility JavaVersion.VERSION_@ANDROID_GRADLE_JAVA_VERSION_INIT@ - } - + @BUILD_GRADLE_COMPILE_OPTIONS@ buildTypes { debug { packagingOptions { @@ -141,29 +136,12 @@ android { } } - buildFeatures { - prefabPublishing true - buildConfig true - } - prefab { - opencv_jni_shared { - headers "native/jni/include" - } - } - sourceSets { main { - jniLibs.srcDirs = ['native/libs'] - java.srcDirs = ['java/src'] - res.srcDirs = ['java/res'] - manifest.srcFile 'java/AndroidManifest.xml' - } - } - - publishing { - singleVariant('release') { - withSourcesJar() - withJavadocJar() + jniLibs.srcDirs = [@SOURSE_SETS_JNI_LIBS_SRC_DIRS@] + java.srcDirs = [@SOURSE_SETS_JAVA_SRC_DIRS@] + res.srcDirs = [@SOURSE_SETS_RES_SRC_DIRS@] + manifest.srcFile @SOURSE_SETS_MANIFEST_SRC_FILE@ } } @@ -172,27 +150,8 @@ android { path (project.projectDir.toString() + '/libcxx_helper/CMakeLists.txt') } } + @BUILD_GRADLE_ANDROID_PUBLISHING_CONFIG@ } - -publishing { - publications { - release(MavenPublication) { - groupId = 'org.opencv' - artifactId = 'opencv' - version = '@OPENCV_VERSION_PLAIN@' - - afterEvaluate { - from components.release - } - } - } - repositories { - maven { - name = 'myrepo' - url = "${project.buildDir}/repo" - } - } -} - +@BUILD_GRADLE_PUBLISHING_CONFIG@ dependencies { } diff --git a/modules/js/generator/CMakeLists.txt b/modules/js/generator/CMakeLists.txt index 3d66df154f..c66608e917 100644 --- a/modules/js/generator/CMakeLists.txt +++ b/modules/js/generator/CMakeLists.txt @@ -38,8 +38,21 @@ set(scripts_hdr_parser "${JS_SOURCE_DIR}/../python/src2/hdr_parser.py") if(DEFINED ENV{OPENCV_JS_WHITELIST}) set(OPENCV_JS_WHITELIST_FILE "$ENV{OPENCV_JS_WHITELIST}") + message(STATUS "Use white list from environment ${OPENCV_JS_WHITELIST_FILE}") else() - set(OPENCV_JS_WHITELIST_FILE "${OpenCV_SOURCE_DIR}/platforms/js/opencv_js.config.py") + #generate white list from modules//misc/js/whitelist.json + set(OPENCV_JS_WHITELIST_FILE "${CMAKE_CURRENT_BINARY_DIR}/whitelist.json") + foreach(m in ${OPENCV_JS_MODULES}) + set(js_whitelist "${OPENCV_MODULE_${m}_LOCATION}/misc/js/gen_dict.json") + if (EXISTS "${js_whitelist}") + file(READ "${js_whitelist}" whitelist_content) + list(APPEND OPENCV_JS_WHITELIST_CONTENT "\"${m}\": ${whitelist_content}") + endif() + endforeach(m) + string(REPLACE ";" ", \n" OPENCV_JS_WHITELIST_CONTENT_STRING "${OPENCV_JS_WHITELIST_CONTENT}") + set(OPENCV_JS_WHITELIST_CONTENT_STRING "{\n${OPENCV_JS_WHITELIST_CONTENT_STRING}}\n") + ocv_update_file("${OPENCV_JS_WHITELIST_FILE}" "${OPENCV_JS_WHITELIST_CONTENT_STRING}") + message(STATUS "Use autogenerated whitelist ${OPENCV_JS_WHITELIST_FILE}") endif() add_custom_command( diff --git a/modules/js/generator/embindgen.py b/modules/js/generator/embindgen.py index 005ac9f175..00d2406046 100644 --- a/modules/js/generator/embindgen.py +++ b/modules/js/generator/embindgen.py @@ -76,6 +76,7 @@ if sys.version_info[0] >= 3: else: from cStringIO import StringIO +import json func_table = {} @@ -103,11 +104,32 @@ def makeWhiteList(module_list): wl[k] = m[k] return wl +def makeWhiteListJson(module_list): + wl = {} + for n, gen_dict in module_list.items(): + m = gen_dict["whitelist"] + for k in m.keys(): + if k in wl: + wl[k] += m[k] + else: + wl[k] = m[k] + return wl + +def makeNamespacePrefixOverride(module_list): + wl = {} + for n, gen_dict in module_list.items(): + if "namespace_prefix_override" in gen_dict: + m = gen_dict["namespace_prefix_override"] + for k in m.keys(): + if k in wl: + wl[k] += m[k] + else: + wl[k] = m[k] + return wl + + white_list = None -namespace_prefix_override = { - 'dnn' : '', - 'aruco' : '', -} +namespace_prefix_override = None # Features to be exported export_enums = False @@ -834,6 +856,7 @@ class JSWrapperGenerator(object): if method.cname in ignore_list: continue if not method.name in white_list[method.class_name]: + #print('Not in whitelist: "{}"'.format(method.name)) continue if method.is_constructor: for variant in method.variants: @@ -938,9 +961,9 @@ if __name__ == "__main__": if len(sys.argv) < 5: print("Usage:\n", \ os.path.basename(sys.argv[0]), \ - " ") + " ") print("Current args are: ", ", ".join(["'"+a+"'" for a in sys.argv])) - exit(0) + exit(1) dstdir = "." hdr_parser_path = os.path.abspath(sys.argv[1]) @@ -953,8 +976,23 @@ if __name__ == "__main__": headers = open(sys.argv[3], 'r').read().split(';') coreBindings = sys.argv[4] whiteListFile = sys.argv[5] - exec(open(whiteListFile).read()) - assert(white_list) + + if whiteListFile.endswith(".json") or whiteListFile.endswith(".JSON"): + with open(whiteListFile) as f: + gen_dict = json.load(f) + f.close() + white_list = makeWhiteListJson(gen_dict) + namespace_prefix_override = makeNamespacePrefixOverride(gen_dict) + elif whiteListFile.endswith(".py") or whiteListFile.endswith(".PY"): + exec(open(whiteListFile).read()) + assert(white_list) + namespace_prefix_override = { + 'dnn' : '', + 'aruco' : '', + } + else: + print("Unexpected format of OpenCV config file", whiteListFile) + exit(1) generator = JSWrapperGenerator() generator.gen(bindingsCpp, headers, coreBindings) diff --git a/modules/objdetect/misc/js/gen_dict.json b/modules/objdetect/misc/js/gen_dict.json new file mode 100644 index 0000000000..dee3e4c93a --- /dev/null +++ b/modules/objdetect/misc/js/gen_dict.json @@ -0,0 +1,28 @@ +{ + "whitelist": + { + "": ["groupRectangles", "getPredefinedDictionary", "extendDictionary", "drawDetectedMarkers", "generateImageMarker", "drawDetectedCornersCharuco", "drawDetectedDiamonds"], + "HOGDescriptor": ["load", "HOGDescriptor", "getDefaultPeopleDetector", "getDaimlerPeopleDetector", "setSVMDetector", "detectMultiScale"], + "CascadeClassifier": ["load", "detectMultiScale2", "CascadeClassifier", "detectMultiScale3", "empty", "detectMultiScale"], + "GraphicalCodeDetector": ["decode", "detect", "detectAndDecode", "detectMulti", "decodeMulti", "detectAndDecodeMulti"], + "QRCodeDetector": ["QRCodeDetector", "decode", "detect", "detectAndDecode", "detectMulti", "decodeMulti", "detectAndDecodeMulti", "decodeCurved", "detectAndDecodeCurved", "setEpsX", "setEpsY"], + "aruco_PredefinedDictionaryType": [], + "aruco_Dictionary": ["Dictionary", "getDistanceToId", "generateImageMarker", "getByteListFromBits", "getBitsFromByteList"], + "aruco_Board": ["Board", "matchImagePoints", "generateImage"], + "aruco_GridBoard": ["GridBoard", "generateImage", "getGridSize", "getMarkerLength", "getMarkerSeparation", "matchImagePoints"], + "aruco_CharucoParameters": ["CharucoParameters"], + "aruco_CharucoBoard": ["CharucoBoard", "generateImage", "getChessboardCorners", "getNearestMarkerCorners", "checkCharucoCornersCollinear", "matchImagePoints", "getLegacyPattern", "setLegacyPattern"], + "aruco_DetectorParameters": ["DetectorParameters"], + "aruco_RefineParameters": ["RefineParameters"], + "aruco_ArucoDetector": ["ArucoDetector", "detectMarkers", "refineDetectedMarkers", "setDictionary", "setDetectorParameters", "setRefineParameters"], + "aruco_CharucoDetector": ["CharucoDetector", "setBoard", "setCharucoParameters", "setDetectorParameters", "setRefineParameters", "detectBoard", "detectDiamonds"], + "QRCodeDetectorAruco_Params": ["Params"], + "QRCodeDetectorAruco": ["QRCodeDetectorAruco", "decode", "detect", "detectAndDecode", "detectMulti", "decodeMulti", "detectAndDecodeMulti", "setDetectorParameters", "setArucoParameters"], + "barcode_BarcodeDetector": ["BarcodeDetector", "decode", "detect", "detectAndDecode", "detectMulti", "decodeMulti", "detectAndDecodeMulti", "decodeWithType", "detectAndDecodeWithType"], + "FaceDetectorYN": ["setInputSize", "getInputSize", "setScoreThreshold", "getScoreThreshold", "setNMSThreshold", "getNMSThreshold", "setTopK", "getTopK", "detect", "create"] + }, + "namespace_prefix_override": + { + "aruco": "" + } +} diff --git a/modules/photo/misc/js/gen_dict.json b/modules/photo/misc/js/gen_dict.json new file mode 100644 index 0000000000..d8a6db100a --- /dev/null +++ b/modules/photo/misc/js/gen_dict.json @@ -0,0 +1,25 @@ +{ + "whitelist": + { + "": [ + "createAlignMTB", "createCalibrateDebevec", "createCalibrateRobertson", + "createMergeDebevec", "createMergeMertens", "createMergeRobertson", + "createTonemapDrago", "createTonemapMantiuk", "createTonemapReinhard", "inpaint"], + "CalibrateCRF": ["process"], + "AlignMTB" : ["calculateShift", "shiftMat", "computeBitmaps", "getMaxBits", "setMaxBits", + "getExcludeRange", "setExcludeRange", "getCut", "setCut"], + "CalibrateDebevec" : ["getLambda", "setLambda", "getSamples", "setSamples", "getRandom", "setRandom"], + "CalibrateRobertson" : ["getMaxIter", "setMaxIter", "getThreshold", "setThreshold", "getRadiance"], + "MergeExposures" : ["process"], + "MergeDebevec" : ["process"], + "MergeMertens" : ["process", "getContrastWeight", "setContrastWeight", "getSaturationWeight", + "setSaturationWeight", "getExposureWeight", "setExposureWeight"], + "MergeRobertson" : ["process"], + "Tonemap" : ["process" , "getGamma", "setGamma"], + "TonemapDrago" : ["getSaturation", "setSaturation", "getBias", "setBias", + "getSigmaColor", "setSigmaColor", "getSigmaSpace","setSigmaSpace"], + "TonemapMantiuk" : ["getScale", "setScale", "getSaturation", "setSaturation"], + "TonemapReinhard" : ["getIntensity", "setIntensity", "getLightAdaptation", "setLightAdaptation", + "getColorAdaptation", "setColorAdaptation"] + } +} diff --git a/modules/stitching/test/test_matchers.cpp b/modules/stitching/test/test_matchers.cpp index 2deb676fb3..16d254ef6d 100644 --- a/modules/stitching/test/test_matchers.cpp +++ b/modules/stitching/test/test_matchers.cpp @@ -67,9 +67,11 @@ TEST(SurfFeaturesFinder, CanFindInROIs) int tl_rect_count = 0, br_rect_count = 0, bad_count = 0; for (const auto &keypoint : roi_features.keypoints) { - if (rois[0].contains(keypoint.pt)) + // Workaround for https://github.com/opencv/opencv/issues/26016 + // To keep its behaviour, keypoint.pt casts to Point_. + if (rois[0].contains(Point_(keypoint.pt))) tl_rect_count++; - else if (rois[1].contains(keypoint.pt)) + else if (rois[1].contains(Point_(keypoint.pt))) br_rect_count++; else bad_count++; diff --git a/modules/ts/src/ts.cpp b/modules/ts/src/ts.cpp index 2c6d3d1260..5d42637dec 100644 --- a/modules/ts/src/ts.cpp +++ b/modules/ts/src/ts.cpp @@ -1035,7 +1035,8 @@ static std::string findData(const std::string& relative_path, bool required, boo } } #ifdef OPENCV_TEST_DATA_INSTALL_PATH - datapath = path_join("./", OPENCV_TEST_DATA_INSTALL_PATH); + datapath = OPENCV_TEST_DATA_INSTALL_PATH; + if (isDirectory(datapath)) { for(size_t i = search_subdir.size(); i > 0; i--) diff --git a/modules/video/misc/js/gen_dict.json b/modules/video/misc/js/gen_dict.json new file mode 100644 index 0000000000..f04aae740f --- /dev/null +++ b/modules/video/misc/js/gen_dict.json @@ -0,0 +1,17 @@ +{ + "whitelist": + { + "": [ + "CamShift", + "calcOpticalFlowFarneback", + "calcOpticalFlowPyrLK", + "createBackgroundSubtractorMOG2", + "findTransformECC", + "meanShift" + ], + "BackgroundSubtractorMOG2": ["BackgroundSubtractorMOG2", "apply"], + "BackgroundSubtractor": ["apply", "getBackgroundImage"], + "TrackerMIL": ["create"], + "TrackerMIL_Params": [] + } +} diff --git a/modules/videoio/src/cap_dshow.cpp b/modules/videoio/src/cap_dshow.cpp index 21af06a147..72754e1759 100644 --- a/modules/videoio/src/cap_dshow.cpp +++ b/modules/videoio/src/cap_dshow.cpp @@ -1382,14 +1382,11 @@ int videoInput::listDevices(bool silent){ // Find the description or friendly name. VARIANT varName; VariantInit(&varName); - hr = pPropBag->Read(L"Description", &varName, 0); + hr = pPropBag->Read(L"FriendlyName", &varName, 0); - if (FAILED(hr)) hr = pPropBag->Read(L"FriendlyName", &varName, 0); + if (FAILED(hr)) hr = pPropBag->Read(L"Description", &varName, 0); if (SUCCEEDED(hr)){ - - hr = pPropBag->Read(L"FriendlyName", &varName, 0); - int count = 0; int maxLen = sizeof(deviceNames[0])/sizeof(deviceNames[0][0]) - 2; while( varName.bstrVal[count] != 0x00 && count < maxLen) { @@ -1401,6 +1398,8 @@ int videoInput::listDevices(bool silent){ if(!silent) DebugPrintOut("SETUP: %i) %s\n",deviceCounter, deviceNames[deviceCounter]); } + VariantClear(&varName); + pPropBag->Release(); pPropBag = NULL; diff --git a/platforms/android/aar-template/README.md b/platforms/android/aar-template/README.md index a851364dd4..47bd441bf8 100644 --- a/platforms/android/aar-template/README.md +++ b/platforms/android/aar-template/README.md @@ -1,7 +1,7 @@ ## Scripts for creating an AAR package and a local Maven repository with OpenCV libraries for Android ### How to run the scripts -1. Set JAVA_HOME and ANDROID_HOME enviroment variables. For example: +1. Set JAVA_HOME and ANDROID_HOME environment variables. For example: ``` export JAVA_HOME=~/Android Studio/jbr export ANDROID_HOME=~/Android/SDK diff --git a/platforms/js/build_js.py b/platforms/js/build_js.py index d45f852544..7af9f97230 100755 --- a/platforms/js/build_js.py +++ b/platforms/js/build_js.py @@ -254,8 +254,7 @@ if __name__ == "__main__": parser.add_argument('--build_flags', help="Append Emscripten build options") parser.add_argument('--build_wasm_intrin_test', default=False, action="store_true", help="Build WASM intrin tests") # Write a path to modify file like argument of this flag - parser.add_argument('--config', default=os.path.join(os.path.dirname(os.path.abspath(__file__)), 'opencv_js.config.py'), - help="Specify configuration file with own list of exported into JS functions") + parser.add_argument('--config', help="Specify configuration file with own list of exported into JS functions") parser.add_argument('--webnn', action="store_true", help="Enable WebNN Backend") transformed_args = ["--cmake_option=%s".format(arg) if arg[:2] == "-D" else arg for arg in sys.argv[1:]] @@ -263,7 +262,8 @@ if __name__ == "__main__": log.debug("Args: %s", args) - os.environ["OPENCV_JS_WHITELIST"] = os.path.abspath(args.config) + if args.config is not None: + os.environ["OPENCV_JS_WHITELIST"] = os.path.abspath(args.config) if 'EMMAKEN_JUST_CONFIGURE' in os.environ: del os.environ['EMMAKEN_JUST_CONFIGURE'] # avoid linker errors with NODERAWFS message then using 'emcmake' launcher diff --git a/samples/python/snippets/stitching.py b/samples/python/snippets/stitching.py index 18a160ec5b..b0c7d05f8a 100644 --- a/samples/python/snippets/stitching.py +++ b/samples/python/snippets/stitching.py @@ -43,12 +43,14 @@ def main(): sys.exit(-1) imgs.append(img) + #![stitching] stitcher = cv.Stitcher.create(args.mode) status, pano = stitcher.stitch(imgs) if status != cv.Stitcher_OK: print("Can't stitch images, error code = %d" % status) sys.exit(-1) + #![stitching] cv.imwrite(args.output, pano) print("stitching completed successfully. %s saved!" % args.output) diff --git a/samples/python/tracker.py b/samples/python/tracker.py index 860f3fc5a7..9e6f939275 100644 --- a/samples/python/tracker.py +++ b/samples/python/tracker.py @@ -13,13 +13,15 @@ For NanoTrack: For VitTrack: vitTracker: https://github.com/opencv/opencv_zoo/raw/fef72f8fa7c52eaf116d3df358d24e6e959ada0e/models/object_tracking_vittrack/object_tracking_vittrack_2023sep.onnx USAGE: - tracker.py [-h] [--input INPUT] [--tracker_algo TRACKER_ALGO] + tracker.py [-h] [--input INPUT_VIDEO] + [--tracker_algo TRACKER_ALGO mil, dasiamrpn, nanotrack, vittrack] [--dasiamrpn_net DASIAMRPN_NET] [--dasiamrpn_kernel_r1 DASIAMRPN_KERNEL_R1] [--dasiamrpn_kernel_cls1 DASIAMRPN_KERNEL_CLS1] [--dasiamrpn_backend DASIAMRPN_BACKEND] [--dasiamrpn_target DASIAMRPN_TARGET] - [--nanotrack_backbone NANOTRACK_BACKEND] [--nanotrack_headneck NANOTRACK_TARGET] + [--nanotrack_backbone NANOTRACK_BACKBONE] + [--nanotrack_headneck NANOTRACK_TARGET] [--vittrack_net VITTRACK_MODEL] '''