From 288a0634c2cb0f539563e0fef6c44d1e7de428c7 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Sat, 6 Apr 2013 18:16:51 +0400 Subject: [PATCH 1/6] Make imgproc.hpp independent from C API --- apps/traincascade/imagestorage.cpp | 3 +- include/opencv/cv.h | 1 - include/opencv/cv.hpp | 2 + include/opencv/cvaux.h | 1 - include/opencv/highgui.h | 1 - modules/gpu/perf/perf_imgproc.cpp | 13 +- modules/imgproc/include/opencv2/imgproc.hpp | 1954 +++++++++-------- .../imgproc/include/opencv2/imgproc/types_c.h | 22 + modules/imgproc/perf/perf_cvt_color.cpp | 284 +-- modules/imgproc/perf/perf_matchTemplate.cpp | 14 +- modules/imgproc/src/corner.cpp | 2 +- modules/imgproc/src/histogram.cpp | 4 - modules/imgproc/src/moments.cpp | 19 - modules/imgproc/src/morph.cpp | 3 - modules/legacy/include/opencv2/legacy.hpp | 1 - .../legacy/include/opencv2/legacy/legacy.hpp | 2 +- modules/nonfree/test/test_detectors.cpp | 4 +- modules/objdetect/test/test_cascadeandhog.cpp | 4 +- .../ocl/include/opencv2/ocl/private/util.hpp | 4 +- modules/ocl/perf/perf_color.cpp | 10 +- modules/ocl/perf/perf_match_template.cpp | 20 +- modules/ocl/test/test_color.cpp | 7 +- modules/ocl/test/test_haar.cpp | 2 +- modules/ocl/test/test_hog.cpp | 10 +- modules/ocl/test/utility.cpp | 2 +- modules/photo/src/denoising.cpp | 8 +- modules/photo/src/inpaint.cpp | 6 + modules/photo/test/test_inpaint.cpp | 2 +- modules/stitching/src/blenders.cpp | 2 +- modules/stitching/src/matchers.cpp | 8 +- modules/stitching/src/seam_finders.cpp | 12 +- modules/ts/src/gpu_test.cpp | 2 +- .../video/include/opencv2/video/tracking.hpp | 4 +- samples/c/delaunay.c | 4 +- samples/c/facedetect.cpp | 2 +- samples/c/polar_transforms.c | 4 +- samples/c/pyramid_segmentation.c | 5 +- samples/c/smiledetect.cpp | 2 +- samples/cpp/3calibration.cpp | 4 +- samples/cpp/build3dmodel.cpp | 2 +- samples/cpp/calibration.cpp | 2 +- samples/cpp/chamfer.cpp | 2 +- .../cpp/detection_based_tracker_sample.cpp | 2 +- samples/cpp/distrans.cpp | 42 +- samples/cpp/edge.cpp | 2 +- samples/cpp/ffilldemo.cpp | 10 +- samples/cpp/fitellipse.cpp | 2 +- samples/cpp/houghcircles.cpp | 4 +- samples/cpp/houghlines.cpp | 2 +- samples/cpp/image.cpp | 4 +- samples/cpp/laplace.cpp | 10 +- samples/cpp/morphology2.cpp | 4 +- samples/cpp/phase_corr.cpp | 2 +- samples/cpp/rgbdodometry.cpp | 4 +- samples/cpp/segment_objects.cpp | 2 +- samples/cpp/squares.cpp | 2 +- samples/cpp/stereo_calib.cpp | 8 +- .../Histograms_Matching/EqualizeHist_Demo.cpp | 2 +- .../MatchTemplate_Demo.cpp | 2 +- .../calcBackProject_Demo1.cpp | 2 +- .../calcBackProject_Demo2.cpp | 2 +- .../Histograms_Matching/compareHist_Demo.cpp | 6 +- .../cpp/tutorial_code/ImgProc/Threshold.cpp | 2 +- .../ImgTrans/CannyDetector_Demo.cpp | 2 +- .../ImgTrans/HoughCircle_Demo.cpp | 4 +- .../ImgTrans/HoughLines_Demo.cpp | 6 +- .../tutorial_code/ImgTrans/Laplace_Demo.cpp | 2 +- .../cpp/tutorial_code/ImgTrans/Remap_Demo.cpp | 2 +- .../cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp | 2 +- .../ShapeDescriptors/findContours_demo.cpp | 4 +- .../generalContours_demo1.cpp | 4 +- .../generalContours_demo2.cpp | 4 +- .../ShapeDescriptors/hull_demo.cpp | 4 +- .../ShapeDescriptors/moments_demo.cpp | 4 +- .../TrackingMotion/cornerDetector_Demo.cpp | 2 +- .../TrackingMotion/cornerHarris_Demo.cpp | 2 +- .../TrackingMotion/cornerSubPix_Demo.cpp | 2 +- .../goodFeaturesToTrack_Demo.cpp | 2 +- .../camera_calibration/camera_calibration.cpp | 2 +- .../interoperability_with_OpenCV_1.cpp | 4 +- .../objectDetection/objectDetection.cpp | 2 +- .../objectDetection/objectDetection2.cpp | 2 +- samples/cpp/video_dmtx.cpp | 2 +- samples/cpp/video_homography.cpp | 2 +- samples/cpp/watershed.cpp | 6 +- samples/gpu/cascadeclassifier.cpp | 4 +- samples/gpu/generalized_hough.cpp | 6 +- samples/gpu/hog.cpp | 8 +- samples/gpu/houghlines.cpp | 2 +- samples/gpu/morphology.cpp | 6 +- samples/gpu/stereo_match.cpp | 12 +- samples/ocl/facedetect.cpp | 2 +- samples/ocl/hog.cpp | 8 +- samples/ocl/squares.cpp | 2 +- samples/ocl/surf_matcher.cpp | 8 +- 95 files changed, 1400 insertions(+), 1300 deletions(-) diff --git a/apps/traincascade/imagestorage.cpp b/apps/traincascade/imagestorage.cpp index 53fddcc0da..a8426e074d 100644 --- a/apps/traincascade/imagestorage.cpp +++ b/apps/traincascade/imagestorage.cpp @@ -1,6 +1,7 @@ #include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" -#include "cv.h" #include "imagestorage.h" #include #include diff --git a/include/opencv/cv.h b/include/opencv/cv.h index d239643b84..ce0d4e20f8 100644 --- a/include/opencv/cv.h +++ b/include/opencv/cv.h @@ -62,7 +62,6 @@ #include "opencv2/core/core_c.h" #include "opencv2/imgproc/imgproc_c.h" -#include "opencv2/imgproc.hpp" #include "opencv2/video.hpp" #include "opencv2/features2d.hpp" #include "opencv2/flann.hpp" diff --git a/include/opencv/cv.hpp b/include/opencv/cv.hpp index 6654dea562..f1c5a1d275 100644 --- a/include/opencv/cv.hpp +++ b/include/opencv/cv.hpp @@ -49,5 +49,7 @@ #include "cv.h" #include "opencv2/core.hpp" +#include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" #endif diff --git a/include/opencv/cvaux.h b/include/opencv/cvaux.h index 62c91dab4b..6f8077b949 100644 --- a/include/opencv/cvaux.h +++ b/include/opencv/cvaux.h @@ -48,7 +48,6 @@ #include "opencv2/core/core_c.h" #include "opencv2/imgproc/imgproc_c.h" -#include "opencv2/imgproc.hpp" #include "opencv2/video.hpp" #include "opencv2/features2d.hpp" #include "opencv2/calib3d.hpp" diff --git a/include/opencv/highgui.h b/include/opencv/highgui.h index 229436a75a..0261029c09 100644 --- a/include/opencv/highgui.h +++ b/include/opencv/highgui.h @@ -44,6 +44,5 @@ #include "opencv2/core/core_c.h" #include "opencv2/highgui/highgui_c.h" -#include "opencv2/highgui.hpp" #endif diff --git a/modules/gpu/perf/perf_imgproc.cpp b/modules/gpu/perf/perf_imgproc.cpp index eff2bfcf2e..8a8e604771 100644 --- a/modules/gpu/perf/perf_imgproc.cpp +++ b/modules/gpu/perf/perf_imgproc.cpp @@ -1815,12 +1815,17 @@ PERF_TEST_P(Sz_Dp_MinDist, ImgProc_HoughCircles, ////////////////////////////////////////////////////////////////////// // GeneralizedHough -CV_FLAGS(GHMethod, cv::GHT_POSITION, cv::GHT_SCALE, cv::GHT_ROTATION); +enum { GHT_POSITION = cv::GeneralizedHough::GHT_POSITION, + GHT_SCALE = cv::GeneralizedHough::GHT_SCALE, + GHT_ROTATION = cv::GeneralizedHough::GHT_ROTATION + }; + +CV_FLAGS(GHMethod, GHT_POSITION, GHT_SCALE, GHT_ROTATION); DEF_PARAM_TEST(Method_Sz, GHMethod, cv::Size); PERF_TEST_P(Method_Sz, ImgProc_GeneralizedHough, - Combine(Values(GHMethod(cv::GHT_POSITION), GHMethod(cv::GHT_POSITION | cv::GHT_SCALE), GHMethod(cv::GHT_POSITION | cv::GHT_ROTATION), GHMethod(cv::GHT_POSITION | cv::GHT_SCALE | cv::GHT_ROTATION)), + Combine(Values(GHMethod(GHT_POSITION), GHMethod(GHT_POSITION | GHT_SCALE), GHMethod(GHT_POSITION | GHT_ROTATION), GHMethod(GHT_POSITION | GHT_SCALE | GHT_ROTATION)), GPU_TYPICAL_MAT_SIZES)) { declare.time(10); @@ -1870,7 +1875,7 @@ PERF_TEST_P(Method_Sz, ImgProc_GeneralizedHough, cv::gpu::GpuMat posAndVotes; cv::Ptr d_hough = cv::gpu::GeneralizedHough_GPU::create(method); - if (method & cv::GHT_ROTATION) + if (method & GHT_ROTATION) { d_hough->set("maxAngle", 90.0); d_hough->set("angleStep", 2.0); @@ -1888,7 +1893,7 @@ PERF_TEST_P(Method_Sz, ImgProc_GeneralizedHough, cv::Mat positions; cv::Ptr hough = cv::GeneralizedHough::create(method); - if (method & cv::GHT_ROTATION) + if (method & GHT_ROTATION) { hough->set("maxAngle", 90.0); hough->set("angleStep", 2.0); diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 8747a30116..16827201f8 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -43,10 +43,6 @@ #ifndef __OPENCV_IMGPROC_HPP__ #define __OPENCV_IMGPROC_HPP__ -#include "opencv2/imgproc/types_c.h" - -#ifdef __cplusplus - #include "opencv2/core.hpp" /*! \namespace cv @@ -55,15 +51,426 @@ namespace cv { -//! various border interpolation methods -enum { BORDER_REPLICATE=IPL_BORDER_REPLICATE, BORDER_CONSTANT=IPL_BORDER_CONSTANT, - BORDER_REFLECT=IPL_BORDER_REFLECT, BORDER_WRAP=IPL_BORDER_WRAP, - BORDER_REFLECT_101=IPL_BORDER_REFLECT_101, BORDER_REFLECT101=BORDER_REFLECT_101, - BORDER_TRANSPARENT=IPL_BORDER_TRANSPARENT, - BORDER_DEFAULT=BORDER_REFLECT_101, BORDER_ISOLATED=16 }; +//! Various border types, image boundaries are denoted with '|' +enum { + BORDER_CONSTANT = 0, // iiiiii|abcdefgh|iiiiiii with some specified 'i' + BORDER_REPLICATE = 1, // aaaaaa|abcdefgh|hhhhhhh + BORDER_REFLECT = 2, // fedcba|abcdefgh|hgfedcb + BORDER_WRAP = 3, // cdefgh|abcdefgh|abcdefg + BORDER_REFLECT_101 = 4, // gfedcb|abcdefgh|gfedcba + BORDER_TRANSPARENT = 5, // uvwxyz|absdefgh|ijklmno + + BORDER_REFLECT101 = BORDER_REFLECT_101, + BORDER_DEFAULT = BORDER_REFLECT_101, + BORDER_ISOLATED = 16 // do not look outside of ROI + }; + +//! type of the kernel +enum { KERNEL_GENERAL = 0, // the kernel is generic. No any type of symmetry or other properties. + KERNEL_SYMMETRICAL = 1, // kernel[i] == kernel[ksize-i-1] , and the anchor is at the center + KERNEL_ASYMMETRICAL = 2, // kernel[i] == -kernel[ksize-i-1] , and the anchor is at the center + KERNEL_SMOOTH = 4, // all the kernel elements are non-negative and summed to 1 + KERNEL_INTEGER = 8 // all the kernel coefficients are integer numbers + }; + +//! type of morphological operation +enum { MORPH_ERODE = 0, + MORPH_DILATE = 1, + MORPH_OPEN = 2, + MORPH_CLOSE = 3, + MORPH_GRADIENT = 4, + MORPH_TOPHAT = 5, + MORPH_BLACKHAT = 6 + }; + +//! shape of the structuring element +enum { MORPH_RECT = 0, + MORPH_CROSS = 1, + MORPH_ELLIPSE = 2 + }; + +//! interpolation algorithm +enum { INTER_NEAREST = 0, //!< nearest neighbor interpolation + INTER_LINEAR = 1, //!< bilinear interpolation + INTER_CUBIC = 2, //!< bicubic interpolation + INTER_AREA = 3, //!< area-based (or super) interpolation + INTER_LANCZOS4 = 4, //!< Lanczos interpolation over 8x8 neighborhood + + INTER_MAX = 7, //!< mask for interpolation codes + WARP_INVERSE_MAP = 16 + }; + +enum { INTER_BITS = 5, + INTER_BITS2 = INTER_BITS * 2, + INTER_TAB_SIZE = 1 << INTER_BITS, + INTER_TAB_SIZE2 = INTER_TAB_SIZE * INTER_TAB_SIZE + }; + +//! Distance types for Distance Transform and M-estimators +enum { DIST_USER = -1, // User defined distance + DIST_L1 = 1, // distance = |x1-x2| + |y1-y2| + DIST_L2 = 2, // the simple euclidean distance + DIST_C = 3, // distance = max(|x1-x2|,|y1-y2|) + DIST_L12 = 4, // L1-L2 metric: distance = 2(sqrt(1+x*x/2) - 1)) + DIST_FAIR = 5, // distance = c^2(|x|/c-log(1+|x|/c)), c = 1.3998 + DIST_WELSCH = 6, // distance = c^2/2(1-exp(-(x/c)^2)), c = 2.9846 + DIST_HUBER = 7 // distance = |x| threshold ? max_value : 0 + THRESH_BINARY_INV = 1, // value = value > threshold ? 0 : max_value + THRESH_TRUNC = 2, // value = value > threshold ? threshold : value + THRESH_TOZERO = 3, // value = value > threshold ? value : 0 + THRESH_TOZERO_INV = 4, // value = value > threshold ? 0 : value + THRESH_MASK = 7, + THRESH_OTSU = 8 // use Otsu algorithm to choose the optimal threshold value + }; + +//! adaptive threshold algorithm +enum { ADAPTIVE_THRESH_MEAN_C = 0, + ADAPTIVE_THRESH_GAUSSIAN_C = 1 + }; + +enum { PROJ_SPHERICAL_ORTHO = 0, + PROJ_SPHERICAL_EQRECT = 1 + }; + +//! class of the pixel in GrabCut algorithm +enum { GC_BGD = 0, //!< background + GC_FGD = 1, //!< foreground + GC_PR_BGD = 2, //!< most probably background + GC_PR_FGD = 3 //!< most probably foreground + }; + +//! GrabCut algorithm flags +enum { GC_INIT_WITH_RECT = 0, + GC_INIT_WITH_MASK = 1, + GC_EVAL = 2 +}; + +//! distanceTransform algorithm flags +enum { DIST_LABEL_CCOMP = 0, + DIST_LABEL_PIXEL = 1 + }; + +//! floodfill algorithm flags +enum { FLOODFILL_FIXED_RANGE = 1 << 16, + FLOODFILL_MASK_ONLY = 1 << 17 + }; + +//! type of the template matching operation +enum { TM_SQDIFF = 0, + TM_SQDIFF_NORMED = 1, + TM_CCORR = 2, + TM_CCORR_NORMED = 3, + TM_CCOEFF = 4, + TM_CCOEFF_NORMED = 5 + }; + +//! connected components algorithm output formats +enum { CC_STAT_LEFT = 0, + CC_STAT_TOP = 1, + CC_STAT_WIDTH = 2, + CC_STAT_HEIGHT = 3, + CC_STAT_AREA = 4, + CC_STAT_MAX = 5 + }; + +//! mode of the contour retrieval algorithm +enum { RETR_EXTERNAL = 0, //!< retrieve only the most external (top-level) contours + RETR_LIST = 1, //!< retrieve all the contours without any hierarchical information + RETR_CCOMP = 2, //!< retrieve the connected components (that can possibly be nested) + RETR_TREE = 3, //!< retrieve all the contours and the whole hierarchy + RETR_FLOODFILL = 4 + }; + +//! the contour approximation algorithm +enum { CHAIN_APPROX_NONE = 1, + CHAIN_APPROX_SIMPLE = 2, + CHAIN_APPROX_TC89_L1 = 3, + CHAIN_APPROX_TC89_KCOS = 4 + }; + +//! Variants of a Hough transform +enum { HOUGH_STANDARD = 0, + HOUGH_PROBABILISTIC = 1, + HOUGH_MULTI_SCALE = 2, + HOUGH_GRADIENT = 3 + }; + +//! Histogram comparison methods +enum { HISTCMP_CORREL = 0, + HISTCMP_CHISQR = 1, + HISTCMP_INTERSECT = 2, + HISTCMP_BHATTACHARYYA = 3, + HISTCMP_HELLINGER = HISTCMP_BHATTACHARYYA + }; + +//! the color conversion code +enum { COLOR_BGR2BGRA = 0, + COLOR_RGB2RGBA = COLOR_BGR2BGRA, + + COLOR_BGRA2BGR = 1, + COLOR_RGBA2RGB = COLOR_BGRA2BGR, + + COLOR_BGR2RGBA = 2, + COLOR_RGB2BGRA = COLOR_BGR2RGBA, + + COLOR_RGBA2BGR = 3, + COLOR_BGRA2RGB = COLOR_RGBA2BGR, + + COLOR_BGR2RGB = 4, + COLOR_RGB2BGR = COLOR_BGR2RGB, + + COLOR_BGRA2RGBA = 5, + COLOR_RGBA2BGRA = COLOR_BGRA2RGBA, + + COLOR_BGR2GRAY = 6, + COLOR_RGB2GRAY = 7, + COLOR_GRAY2BGR = 8, + COLOR_GRAY2RGB = COLOR_GRAY2BGR, + COLOR_GRAY2BGRA = 9, + COLOR_GRAY2RGBA = COLOR_GRAY2BGRA, + COLOR_BGRA2GRAY = 10, + COLOR_RGBA2GRAY = 11, + + COLOR_BGR2BGR565 = 12, + COLOR_RGB2BGR565 = 13, + COLOR_BGR5652BGR = 14, + COLOR_BGR5652RGB = 15, + COLOR_BGRA2BGR565 = 16, + COLOR_RGBA2BGR565 = 17, + COLOR_BGR5652BGRA = 18, + COLOR_BGR5652RGBA = 19, + + COLOR_GRAY2BGR565 = 20, + COLOR_BGR5652GRAY = 21, + + COLOR_BGR2BGR555 = 22, + COLOR_RGB2BGR555 = 23, + COLOR_BGR5552BGR = 24, + COLOR_BGR5552RGB = 25, + COLOR_BGRA2BGR555 = 26, + COLOR_RGBA2BGR555 = 27, + COLOR_BGR5552BGRA = 28, + COLOR_BGR5552RGBA = 29, + + COLOR_GRAY2BGR555 = 30, + COLOR_BGR5552GRAY = 31, + + COLOR_BGR2XYZ = 32, + COLOR_RGB2XYZ = 33, + COLOR_XYZ2BGR = 34, + COLOR_XYZ2RGB = 35, + + COLOR_BGR2YCrCb = 36, + COLOR_RGB2YCrCb = 37, + COLOR_YCrCb2BGR = 38, + COLOR_YCrCb2RGB = 39, + + COLOR_BGR2HSV = 40, + COLOR_RGB2HSV = 41, + + COLOR_BGR2Lab = 44, + COLOR_RGB2Lab = 45, + + COLOR_BGR2Luv = 50, + COLOR_RGB2Luv = 51, + COLOR_BGR2HLS = 52, + COLOR_RGB2HLS = 53, + + COLOR_HSV2BGR = 54, + COLOR_HSV2RGB = 55, + + COLOR_Lab2BGR = 56, + COLOR_Lab2RGB = 57, + COLOR_Luv2BGR = 58, + COLOR_Luv2RGB = 59, + COLOR_HLS2BGR = 60, + COLOR_HLS2RGB = 61, + + COLOR_BGR2HSV_FULL = 66, + COLOR_RGB2HSV_FULL = 67, + COLOR_BGR2HLS_FULL = 68, + COLOR_RGB2HLS_FULL = 69, + + COLOR_HSV2BGR_FULL = 70, + COLOR_HSV2RGB_FULL = 71, + COLOR_HLS2BGR_FULL = 72, + COLOR_HLS2RGB_FULL = 73, + + COLOR_LBGR2Lab = 74, + COLOR_LRGB2Lab = 75, + COLOR_LBGR2Luv = 76, + COLOR_LRGB2Luv = 77, + + COLOR_Lab2LBGR = 78, + COLOR_Lab2LRGB = 79, + COLOR_Luv2LBGR = 80, + COLOR_Luv2LRGB = 81, + + COLOR_BGR2YUV = 82, + COLOR_RGB2YUV = 83, + COLOR_YUV2BGR = 84, + COLOR_YUV2RGB = 85, + + // YUV 4:2:0 family to RGB + COLOR_YUV2RGB_NV12 = 90, + COLOR_YUV2BGR_NV12 = 91, + COLOR_YUV2RGB_NV21 = 92, + COLOR_YUV2BGR_NV21 = 93, + COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21, + COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21, + + COLOR_YUV2RGBA_NV12 = 94, + COLOR_YUV2BGRA_NV12 = 95, + COLOR_YUV2RGBA_NV21 = 96, + COLOR_YUV2BGRA_NV21 = 97, + COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21, + COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21, + + COLOR_YUV2RGB_YV12 = 98, + COLOR_YUV2BGR_YV12 = 99, + COLOR_YUV2RGB_IYUV = 100, + COLOR_YUV2BGR_IYUV = 101, + COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV, + COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV, + COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12, + COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12, + + COLOR_YUV2RGBA_YV12 = 102, + COLOR_YUV2BGRA_YV12 = 103, + COLOR_YUV2RGBA_IYUV = 104, + COLOR_YUV2BGRA_IYUV = 105, + COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV, + COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV, + COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12, + COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12, + + COLOR_YUV2GRAY_420 = 106, + COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420, + COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420, + COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420, + COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420, + + // YUV 4:2:2 family to RGB + COLOR_YUV2RGB_UYVY = 107, + COLOR_YUV2BGR_UYVY = 108, + //COLOR_YUV2RGB_VYUY = 109, + //COLOR_YUV2BGR_VYUY = 110, + COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY, + COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY, + COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY, + + COLOR_YUV2RGBA_UYVY = 111, + COLOR_YUV2BGRA_UYVY = 112, + //COLOR_YUV2RGBA_VYUY = 113, + //COLOR_YUV2BGRA_VYUY = 114, + COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY, + COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY, + COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY, + + COLOR_YUV2RGB_YUY2 = 115, + COLOR_YUV2BGR_YUY2 = 116, + COLOR_YUV2RGB_YVYU = 117, + COLOR_YUV2BGR_YVYU = 118, + COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2, + COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2, + COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2, + + COLOR_YUV2RGBA_YUY2 = 119, + COLOR_YUV2BGRA_YUY2 = 120, + COLOR_YUV2RGBA_YVYU = 121, + COLOR_YUV2BGRA_YVYU = 122, + COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2, + COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2, + COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2, + + COLOR_YUV2GRAY_UYVY = 123, + COLOR_YUV2GRAY_YUY2 = 124, + //CV_YUV2GRAY_VYUY = CV_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY, + COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2, + COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2, + + // alpha premultiplication + COLOR_RGBA2mRGBA = 125, + COLOR_mRGBA2RGBA = 126, + + // RGB to YUV 4:2:0 family + COLOR_RGB2YUV_I420 = 127, + COLOR_BGR2YUV_I420 = 128, + COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420, + COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420, + + COLOR_RGBA2YUV_I420 = 129, + COLOR_BGRA2YUV_I420 = 130, + COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420, + COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420, + COLOR_RGB2YUV_YV12 = 131, + COLOR_BGR2YUV_YV12 = 132, + COLOR_RGBA2YUV_YV12 = 133, + COLOR_BGRA2YUV_YV12 = 134, + + // Demosaicing + COLOR_BayerBG2BGR = 46, + COLOR_BayerGB2BGR = 47, + COLOR_BayerRG2BGR = 48, + COLOR_BayerGR2BGR = 49, + + COLOR_BayerBG2RGB = COLOR_BayerRG2BGR, + COLOR_BayerGB2RGB = COLOR_BayerGR2BGR, + COLOR_BayerRG2RGB = COLOR_BayerBG2BGR, + COLOR_BayerGR2RGB = COLOR_BayerGB2BGR, + + COLOR_BayerBG2GRAY = 86, + COLOR_BayerGB2GRAY = 87, + COLOR_BayerRG2GRAY = 88, + COLOR_BayerGR2GRAY = 89, + + // Demosaicing using Variable Number of Gradients + COLOR_BayerBG2BGR_VNG = 62, + COLOR_BayerGB2BGR_VNG = 63, + COLOR_BayerRG2BGR_VNG = 64, + COLOR_BayerGR2BGR_VNG = 65, + + COLOR_BayerBG2RGB_VNG = COLOR_BayerRG2BGR_VNG, + COLOR_BayerGB2RGB_VNG = COLOR_BayerGR2BGR_VNG, + COLOR_BayerRG2RGB_VNG = COLOR_BayerBG2BGR_VNG, + COLOR_BayerGR2RGB_VNG = COLOR_BayerGB2BGR_VNG, + + // Edge-Aware Demosaicing + COLOR_BayerBG2BGR_EA = 135, + COLOR_BayerGB2BGR_EA = 136, + COLOR_BayerRG2BGR_EA = 137, + COLOR_BayerGR2BGR_EA = 138, + + COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA, + COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA, + COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA, + COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA, + + + COLOR_COLORCVT_MAX = 139 +}; + -//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p. -CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType ); /*! The Base Class for 1D or Row-wise Filters @@ -82,9 +489,10 @@ public: //! the destructor virtual ~BaseRowFilter(); //! the filtering operator. Must be overrided in the derived classes. The horizontal border interpolation is done outside of the class. - virtual void operator()(const uchar* src, uchar* dst, - int width, int cn) = 0; - int ksize, anchor; + virtual void operator()(const uchar* src, uchar* dst, int width, int cn) = 0; + + int ksize; + int anchor; }; @@ -109,13 +517,15 @@ public: //! the destructor virtual ~BaseColumnFilter(); //! the filtering operator. Must be overrided in the derived classes. The vertical border interpolation is done outside of the class. - virtual void operator()(const uchar** src, uchar* dst, int dststep, - int dstcount, int width) = 0; + virtual void operator()(const uchar** src, uchar* dst, int dststep, int dstcount, int width) = 0; //! resets the internal buffers, if any virtual void reset(); - int ksize, anchor; + + int ksize; + int anchor; }; + /*! The Base Class for Non-Separable 2D Filters. @@ -135,14 +545,15 @@ public: //! the destructor virtual ~BaseFilter(); //! the filtering operator. The horizontal and the vertical border interpolation is done outside of the class. - virtual void operator()(const uchar** src, uchar* dst, int dststep, - int dstcount, int width, int cn) = 0; + virtual void operator()(const uchar** src, uchar* dst, int dststep, int dstcount, int width, int cn) = 0; //! resets the internal buffers, if any virtual void reset(); + Size ksize; Point anchor; }; + /*! The Main Class for Image Filtering. @@ -226,9 +637,9 @@ public: const Ptr& _rowFilter, const Ptr& _columnFilter, int srcType, int dstType, int bufType, - int _rowBorderType=BORDER_REPLICATE, - int _columnBorderType=-1, - const Scalar& _borderValue=Scalar()); + int _rowBorderType = BORDER_REPLICATE, + int _columnBorderType = -1, + const Scalar& _borderValue = Scalar()); //! the destructor virtual ~FilterEngine(); //! reinitializes the engine. The previously assigned filters are released. @@ -236,42 +647,52 @@ public: const Ptr& _rowFilter, const Ptr& _columnFilter, int srcType, int dstType, int bufType, - int _rowBorderType=BORDER_REPLICATE, int _columnBorderType=-1, - const Scalar& _borderValue=Scalar()); + int _rowBorderType = BORDER_REPLICATE, + int _columnBorderType = -1, + const Scalar& _borderValue = Scalar()); //! starts filtering of the specified ROI of an image of size wholeSize. - virtual int start(Size wholeSize, Rect roi, int maxBufRows=-1); + virtual int start(Size wholeSize, Rect roi, int maxBufRows = -1); //! starts filtering of the specified ROI of the specified image. - virtual int start(const Mat& src, const Rect& srcRoi=Rect(0,0,-1,-1), - bool isolated=false, int maxBufRows=-1); + virtual int start(const Mat& src, const Rect& srcRoi = Rect(0,0,-1,-1), + bool isolated = false, int maxBufRows = -1); //! processes the next srcCount rows of the image. virtual int proceed(const uchar* src, int srcStep, int srcCount, uchar* dst, int dstStep); //! applies filter to the specified ROI of the image. if srcRoi=(0,0,-1,-1), the whole image is filtered. virtual void apply( const Mat& src, Mat& dst, - const Rect& srcRoi=Rect(0,0,-1,-1), - Point dstOfs=Point(0,0), - bool isolated=false); + const Rect& srcRoi = Rect(0,0,-1,-1), + Point dstOfs = Point(0,0), + bool isolated = false); //! returns true if the filter is separable bool isSeparable() const { return (const BaseFilter*)filter2D == 0; } //! returns the number int remainingInputRows() const; int remainingOutputRows() const; - int srcType, dstType, bufType; + int srcType; + int dstType; + int bufType; Size ksize; Point anchor; int maxWidth; Size wholeSize; Rect roi; - int dx1, dx2; - int rowBorderType, columnBorderType; + int dx1; + int dx2; + int rowBorderType; + int columnBorderType; std::vector borderTab; int borderElemSize; std::vector ringBuf; std::vector srcRow; std::vector constBorderValue; std::vector constBorderRow; - int bufStep, startY, startY0, endY, rowCount, dstY; + int bufStep; + int startY; + int startY0; + int endY; + int rowCount; + int dstY; std::vector rows; Ptr filter2D; @@ -279,219 +700,6 @@ public: Ptr columnFilter; }; -//! type of the kernel -enum { KERNEL_GENERAL=0, KERNEL_SYMMETRICAL=1, KERNEL_ASYMMETRICAL=2, - KERNEL_SMOOTH=4, KERNEL_INTEGER=8 }; - -//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients. -CV_EXPORTS int getKernelType(InputArray kernel, Point anchor); - -//! returns the primitive row filter with the specified kernel -CV_EXPORTS Ptr getLinearRowFilter(int srcType, int bufType, - InputArray kernel, int anchor, - int symmetryType); - -//! returns the primitive column filter with the specified kernel -CV_EXPORTS Ptr getLinearColumnFilter(int bufType, int dstType, - InputArray kernel, int anchor, - int symmetryType, double delta=0, - int bits=0); - -//! returns 2D filter with the specified kernel -CV_EXPORTS Ptr getLinearFilter(int srcType, int dstType, - InputArray kernel, - Point anchor=Point(-1,-1), - double delta=0, int bits=0); - -//! returns the separable linear filter engine -CV_EXPORTS Ptr createSeparableLinearFilter(int srcType, int dstType, - InputArray rowKernel, InputArray columnKernel, - Point anchor=Point(-1,-1), double delta=0, - int rowBorderType=BORDER_DEFAULT, - int columnBorderType=-1, - const Scalar& borderValue=Scalar()); - -//! returns the non-separable linear filter engine -CV_EXPORTS Ptr createLinearFilter(int srcType, int dstType, - InputArray kernel, Point _anchor=Point(-1,-1), - double delta=0, int rowBorderType=BORDER_DEFAULT, - int columnBorderType=-1, const Scalar& borderValue=Scalar()); - -//! returns the Gaussian kernel with the specified parameters -CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype=CV_64F ); - -//! returns the Gaussian filter engine -CV_EXPORTS Ptr createGaussianFilter( int type, Size ksize, - double sigma1, double sigma2=0, - int borderType=BORDER_DEFAULT); -//! initializes kernels of the generalized Sobel operator -CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky, - int dx, int dy, int ksize, - bool normalize=false, int ktype=CV_32F ); -//! returns filter engine for the generalized Sobel operator -CV_EXPORTS Ptr createDerivFilter( int srcType, int dstType, - int dx, int dy, int ksize, - int borderType=BORDER_DEFAULT ); -//! returns horizontal 1D box filter -CV_EXPORTS Ptr getRowSumFilter(int srcType, int sumType, - int ksize, int anchor=-1); -//! returns vertical 1D box filter -CV_EXPORTS Ptr getColumnSumFilter( int sumType, int dstType, - int ksize, int anchor=-1, - double scale=1); -//! returns box filter engine -CV_EXPORTS Ptr createBoxFilter( int srcType, int dstType, Size ksize, - Point anchor=Point(-1,-1), - bool normalize=true, - int borderType=BORDER_DEFAULT); - -//! returns the Gabor kernel with the specified parameters -CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd, - double gamma, double psi=CV_PI*0.5, int ktype=CV_64F ); - -//! type of morphological operation -enum { MORPH_ERODE=CV_MOP_ERODE, MORPH_DILATE=CV_MOP_DILATE, - MORPH_OPEN=CV_MOP_OPEN, MORPH_CLOSE=CV_MOP_CLOSE, - MORPH_GRADIENT=CV_MOP_GRADIENT, MORPH_TOPHAT=CV_MOP_TOPHAT, - MORPH_BLACKHAT=CV_MOP_BLACKHAT }; - -//! returns horizontal 1D morphological filter -CV_EXPORTS Ptr getMorphologyRowFilter(int op, int type, int ksize, int anchor=-1); -//! returns vertical 1D morphological filter -CV_EXPORTS Ptr getMorphologyColumnFilter(int op, int type, int ksize, int anchor=-1); -//! returns 2D morphological filter -CV_EXPORTS Ptr getMorphologyFilter(int op, int type, InputArray kernel, - Point anchor=Point(-1,-1)); - -//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. -static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); } - -//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. -CV_EXPORTS Ptr createMorphologyFilter(int op, int type, InputArray kernel, - Point anchor=Point(-1,-1), int rowBorderType=BORDER_CONSTANT, - int columnBorderType=-1, - const Scalar& borderValue=morphologyDefaultBorderValue()); - -//! shape of the structuring element -enum { MORPH_RECT=0, MORPH_CROSS=1, MORPH_ELLIPSE=2 }; -//! returns structuring element of the specified shape and size -CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor=Point(-1,-1)); - -template<> CV_EXPORTS void Ptr::delete_obj(); - -//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode -CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst, - int top, int bottom, int left, int right, - int borderType, const Scalar& value=Scalar() ); - -//! smooths the image using median filter. -CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize ); -//! smooths the image using Gaussian filter. -CV_EXPORTS_W void GaussianBlur( InputArray src, - OutputArray dst, Size ksize, - double sigmaX, double sigmaY=0, - int borderType=BORDER_DEFAULT ); -//! smooths the image using bilateral filter -CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, - double sigmaColor, double sigmaSpace, - int borderType=BORDER_DEFAULT ); -//! smooths the image using the box filter. Each pixel is processed in O(1) time -CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, - Size ksize, Point anchor=Point(-1,-1), - bool normalize=true, - int borderType=BORDER_DEFAULT ); -//! a synonym for normalized box filter -CV_EXPORTS_W void blur( InputArray src, OutputArray dst, - Size ksize, Point anchor=Point(-1,-1), - int borderType=BORDER_DEFAULT ); - -//! applies non-separable 2D linear filter to the image -CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, - InputArray kernel, Point anchor=Point(-1,-1), - double delta=0, int borderType=BORDER_DEFAULT ); - -//! applies separable 2D linear filter to the image -CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, - InputArray kernelX, InputArray kernelY, - Point anchor=Point(-1,-1), - double delta=0, int borderType=BORDER_DEFAULT ); - -//! applies generalized Sobel operator to the image -CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, - int dx, int dy, int ksize=3, - double scale=1, double delta=0, - int borderType=BORDER_DEFAULT ); - -//! applies the vertical or horizontal Scharr operator to the image -CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, - int dx, int dy, double scale=1, double delta=0, - int borderType=BORDER_DEFAULT ); - -//! applies Laplacian operator to the image -CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, - int ksize=1, double scale=1, double delta=0, - int borderType=BORDER_DEFAULT ); - -//! applies Canny edge detector and produces the edge map. -CV_EXPORTS_W void Canny( InputArray image, OutputArray edges, - double threshold1, double threshold2, - int apertureSize=3, bool L2gradient=false ); - -//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria -CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, - int blockSize, int ksize=3, - int borderType=BORDER_DEFAULT ); - -//! computes Harris cornerness criteria at each image pixel -CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, - int ksize, double k, - int borderType=BORDER_DEFAULT ); - -// low-level function for computing eigenvalues and eigenvectors of 2x2 matrices -CV_EXPORTS void eigen2x2( const float* a, float* e, int n ); - -//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. -CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst, - int blockSize, int ksize, - int borderType=BORDER_DEFAULT ); - -//! computes another complex cornerness criteria at each pixel -CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, - int borderType=BORDER_DEFAULT ); - -//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria -CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners, - Size winSize, Size zeroZone, - TermCriteria criteria ); - -//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima -CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, - int maxCorners, double qualityLevel, double minDistance, - InputArray mask=noArray(), int blockSize=3, - bool useHarrisDetector=false, double k=0.04 ); - -//! finds lines in the black-n-white image using the standard or pyramid Hough transform -CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines, - double rho, double theta, int threshold, - double srn=0, double stn=0 ); - -//! finds line segments in the black-n-white image using probabalistic Hough transform -CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines, - double rho, double theta, int threshold, - double minLineLength=0, double maxLineGap=0 ); - -//! finds circles in the grayscale image using 2+1 gradient Hough transform -CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles, - int method, double dp, double minDist, - double param1=100, double param2=100, - int minRadius=0, int maxRadius=0 ); - -enum -{ - GHT_POSITION = 0, - GHT_SCALE = 1, - GHT_ROTATION = 2 -}; //! finds arbitrary template in the grayscale image using Generalized Hough Transform //! Ballard, D.H. (1981). Generalizing the Hough transform to detect arbitrary shapes. Pattern Recognition 13 (2): 111-122. @@ -499,6 +707,11 @@ enum class CV_EXPORTS GeneralizedHough : public Algorithm { public: + enum { GHT_POSITION = 0, + GHT_SCALE = 1, + GHT_ROTATION = 2 + }; + static Ptr create(int method); virtual ~GeneralizedHough(); @@ -519,240 +732,11 @@ protected: virtual void releaseImpl() = 0; private: - Mat edges_, dx_, dy_; + Mat edges_; + Mat dx_; + Mat dy_; }; -//! erodes the image (applies the local minimum operator) -CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel, - Point anchor=Point(-1,-1), int iterations=1, - int borderType=BORDER_CONSTANT, - const Scalar& borderValue=morphologyDefaultBorderValue() ); - -//! dilates the image (applies the local maximum operator) -CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel, - Point anchor=Point(-1,-1), int iterations=1, - int borderType=BORDER_CONSTANT, - const Scalar& borderValue=morphologyDefaultBorderValue() ); - -//! applies an advanced morphological operation to the image -CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst, - int op, InputArray kernel, - Point anchor=Point(-1,-1), int iterations=1, - int borderType=BORDER_CONSTANT, - const Scalar& borderValue=morphologyDefaultBorderValue() ); - -//! interpolation algorithm -enum -{ - INTER_NEAREST=CV_INTER_NN, //!< nearest neighbor interpolation - INTER_LINEAR=CV_INTER_LINEAR, //!< bilinear interpolation - INTER_CUBIC=CV_INTER_CUBIC, //!< bicubic interpolation - INTER_AREA=CV_INTER_AREA, //!< area-based (or super) interpolation - INTER_LANCZOS4=CV_INTER_LANCZOS4, //!< Lanczos interpolation over 8x8 neighborhood - INTER_MAX=7, - WARP_INVERSE_MAP=CV_WARP_INVERSE_MAP -}; - -//! resizes the image -CV_EXPORTS_W void resize( InputArray src, OutputArray dst, - Size dsize, double fx=0, double fy=0, - int interpolation=INTER_LINEAR ); - -//! warps the image using affine transformation -CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst, - InputArray M, Size dsize, - int flags=INTER_LINEAR, - int borderMode=BORDER_CONSTANT, - const Scalar& borderValue=Scalar()); - -//! warps the image using perspective transformation -CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst, - InputArray M, Size dsize, - int flags=INTER_LINEAR, - int borderMode=BORDER_CONSTANT, - const Scalar& borderValue=Scalar()); - -enum -{ - INTER_BITS=5, INTER_BITS2=INTER_BITS*2, - INTER_TAB_SIZE=(1< CV_EXPORTS void Ptr::delete_obj(); - -//! computes the joint dense histogram for a set of images. -CV_EXPORTS void calcHist( const Mat* images, int nimages, - const int* channels, InputArray mask, - OutputArray hist, int dims, const int* histSize, - const float** ranges, bool uniform=true, bool accumulate=false ); - -//! computes the joint sparse histogram for a set of images. -CV_EXPORTS void calcHist( const Mat* images, int nimages, - const int* channels, InputArray mask, - SparseMat& hist, int dims, - const int* histSize, const float** ranges, - bool uniform=true, bool accumulate=false ); - -CV_EXPORTS_W void calcHist( InputArrayOfArrays images, - const std::vector& channels, - InputArray mask, OutputArray hist, - const std::vector& histSize, - const std::vector& ranges, - bool accumulate=false ); - -//! computes back projection for the set of images -CV_EXPORTS void calcBackProject( const Mat* images, int nimages, - const int* channels, InputArray hist, - OutputArray backProject, const float** ranges, - double scale=1, bool uniform=true ); - -//! computes back projection for the set of images -CV_EXPORTS void calcBackProject( const Mat* images, int nimages, - const int* channels, const SparseMat& hist, - OutputArray backProject, const float** ranges, - double scale=1, bool uniform=true ); - -CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const std::vector& channels, - InputArray hist, OutputArray dst, - const std::vector& ranges, - double scale ); - -/*CV_EXPORTS void calcBackProjectPatch( const Mat* images, int nimages, const int* channels, - InputArray hist, OutputArray dst, Size patchSize, - int method, double factor=1 ); - -CV_EXPORTS_W void calcBackProjectPatch( InputArrayOfArrays images, const std::vector& channels, - InputArray hist, OutputArray dst, Size patchSize, - int method, double factor=1 );*/ - -//! compares two histograms stored in dense arrays -CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method ); - -//! compares two histograms stored in sparse arrays -CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method ); - -//! normalizes the grayscale image brightness and contrast by normalizing its histogram -CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); class CV_EXPORTS CLAHE : public Algorithm { @@ -767,330 +751,7 @@ public: virtual void collectGarbage() = 0; }; -CV_EXPORTS Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); -CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, - int distType, InputArray cost=noArray(), - float* lowerBound=0, OutputArray flow=noArray() ); - -//! segments the image using watershed algorithm -CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers ); - -//! filters image using meanshift algorithm -CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst, - double sp, double sr, int maxLevel=1, - TermCriteria termcrit=TermCriteria( - TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ); - -//! class of the pixel in GrabCut algorithm -enum -{ - GC_BGD = 0, //!< background - GC_FGD = 1, //!< foreground - GC_PR_BGD = 2, //!< most probably background - GC_PR_FGD = 3 //!< most probably foreground -}; - -//! GrabCut algorithm flags -enum -{ - GC_INIT_WITH_RECT = 0, - GC_INIT_WITH_MASK = 1, - GC_EVAL = 2 -}; - -//! segments the image using GrabCut algorithm -CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, - InputOutputArray bgdModel, InputOutputArray fgdModel, - int iterCount, int mode = GC_EVAL ); - -enum -{ - DIST_LABEL_CCOMP = 0, - DIST_LABEL_PIXEL = 1 -}; - -//! builds the discrete Voronoi diagram -CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst, - OutputArray labels, int distanceType, int maskSize, - int labelType=DIST_LABEL_CCOMP ); - -//! computes the distance transform map -CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst, - int distanceType, int maskSize ); - -enum { FLOODFILL_FIXED_RANGE = 1 << 16, FLOODFILL_MASK_ONLY = 1 << 17 }; - -//! fills the semi-uniform image region starting from the specified seed point -CV_EXPORTS int floodFill( InputOutputArray image, - Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, - Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), - int flags=4 ); - -//! fills the semi-uniform image region and/or the mask starting from the specified seed point -CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask, - Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, - Scalar loDiff=Scalar(), Scalar upDiff=Scalar(), - int flags=4 ); - - -enum -{ - COLOR_BGR2BGRA =0, - COLOR_RGB2RGBA =COLOR_BGR2BGRA, - - COLOR_BGRA2BGR =1, - COLOR_RGBA2RGB =COLOR_BGRA2BGR, - - COLOR_BGR2RGBA =2, - COLOR_RGB2BGRA =COLOR_BGR2RGBA, - - COLOR_RGBA2BGR =3, - COLOR_BGRA2RGB =COLOR_RGBA2BGR, - - COLOR_BGR2RGB =4, - COLOR_RGB2BGR =COLOR_BGR2RGB, - - COLOR_BGRA2RGBA =5, - COLOR_RGBA2BGRA =COLOR_BGRA2RGBA, - - COLOR_BGR2GRAY =6, - COLOR_RGB2GRAY =7, - COLOR_GRAY2BGR =8, - COLOR_GRAY2RGB =COLOR_GRAY2BGR, - COLOR_GRAY2BGRA =9, - COLOR_GRAY2RGBA =COLOR_GRAY2BGRA, - COLOR_BGRA2GRAY =10, - COLOR_RGBA2GRAY =11, - - COLOR_BGR2BGR565 =12, - COLOR_RGB2BGR565 =13, - COLOR_BGR5652BGR =14, - COLOR_BGR5652RGB =15, - COLOR_BGRA2BGR565 =16, - COLOR_RGBA2BGR565 =17, - COLOR_BGR5652BGRA =18, - COLOR_BGR5652RGBA =19, - - COLOR_GRAY2BGR565 =20, - COLOR_BGR5652GRAY =21, - - COLOR_BGR2BGR555 =22, - COLOR_RGB2BGR555 =23, - COLOR_BGR5552BGR =24, - COLOR_BGR5552RGB =25, - COLOR_BGRA2BGR555 =26, - COLOR_RGBA2BGR555 =27, - COLOR_BGR5552BGRA =28, - COLOR_BGR5552RGBA =29, - - COLOR_GRAY2BGR555 =30, - COLOR_BGR5552GRAY =31, - - COLOR_BGR2XYZ =32, - COLOR_RGB2XYZ =33, - COLOR_XYZ2BGR =34, - COLOR_XYZ2RGB =35, - - COLOR_BGR2YCrCb =36, - COLOR_RGB2YCrCb =37, - COLOR_YCrCb2BGR =38, - COLOR_YCrCb2RGB =39, - - COLOR_BGR2HSV =40, - COLOR_RGB2HSV =41, - - COLOR_BGR2Lab =44, - COLOR_RGB2Lab =45, - - COLOR_BayerBG2BGR =46, - COLOR_BayerGB2BGR =47, - COLOR_BayerRG2BGR =48, - COLOR_BayerGR2BGR =49, - - COLOR_BayerBG2RGB =COLOR_BayerRG2BGR, - COLOR_BayerGB2RGB =COLOR_BayerGR2BGR, - COLOR_BayerRG2RGB =COLOR_BayerBG2BGR, - COLOR_BayerGR2RGB =COLOR_BayerGB2BGR, - - COLOR_BGR2Luv =50, - COLOR_RGB2Luv =51, - COLOR_BGR2HLS =52, - COLOR_RGB2HLS =53, - - COLOR_HSV2BGR =54, - COLOR_HSV2RGB =55, - - COLOR_Lab2BGR =56, - COLOR_Lab2RGB =57, - COLOR_Luv2BGR =58, - COLOR_Luv2RGB =59, - COLOR_HLS2BGR =60, - COLOR_HLS2RGB =61, - - COLOR_BayerBG2BGR_VNG =62, - COLOR_BayerGB2BGR_VNG =63, - COLOR_BayerRG2BGR_VNG =64, - COLOR_BayerGR2BGR_VNG =65, - - COLOR_BayerBG2RGB_VNG =COLOR_BayerRG2BGR_VNG, - COLOR_BayerGB2RGB_VNG =COLOR_BayerGR2BGR_VNG, - COLOR_BayerRG2RGB_VNG =COLOR_BayerBG2BGR_VNG, - COLOR_BayerGR2RGB_VNG =COLOR_BayerGB2BGR_VNG, - - COLOR_BGR2HSV_FULL = 66, - COLOR_RGB2HSV_FULL = 67, - COLOR_BGR2HLS_FULL = 68, - COLOR_RGB2HLS_FULL = 69, - - COLOR_HSV2BGR_FULL = 70, - COLOR_HSV2RGB_FULL = 71, - COLOR_HLS2BGR_FULL = 72, - COLOR_HLS2RGB_FULL = 73, - - COLOR_LBGR2Lab = 74, - COLOR_LRGB2Lab = 75, - COLOR_LBGR2Luv = 76, - COLOR_LRGB2Luv = 77, - - COLOR_Lab2LBGR = 78, - COLOR_Lab2LRGB = 79, - COLOR_Luv2LBGR = 80, - COLOR_Luv2LRGB = 81, - - COLOR_BGR2YUV = 82, - COLOR_RGB2YUV = 83, - COLOR_YUV2BGR = 84, - COLOR_YUV2RGB = 85, - - COLOR_BayerBG2GRAY = 86, - COLOR_BayerGB2GRAY = 87, - COLOR_BayerRG2GRAY = 88, - COLOR_BayerGR2GRAY = 89, - - //YUV 4:2:0 formats family - COLOR_YUV2RGB_NV12 = 90, - COLOR_YUV2BGR_NV12 = 91, - COLOR_YUV2RGB_NV21 = 92, - COLOR_YUV2BGR_NV21 = 93, - COLOR_YUV420sp2RGB = COLOR_YUV2RGB_NV21, - COLOR_YUV420sp2BGR = COLOR_YUV2BGR_NV21, - - COLOR_YUV2RGBA_NV12 = 94, - COLOR_YUV2BGRA_NV12 = 95, - COLOR_YUV2RGBA_NV21 = 96, - COLOR_YUV2BGRA_NV21 = 97, - COLOR_YUV420sp2RGBA = COLOR_YUV2RGBA_NV21, - COLOR_YUV420sp2BGRA = COLOR_YUV2BGRA_NV21, - - COLOR_YUV2RGB_YV12 = 98, - COLOR_YUV2BGR_YV12 = 99, - COLOR_YUV2RGB_IYUV = 100, - COLOR_YUV2BGR_IYUV = 101, - COLOR_YUV2RGB_I420 = COLOR_YUV2RGB_IYUV, - COLOR_YUV2BGR_I420 = COLOR_YUV2BGR_IYUV, - COLOR_YUV420p2RGB = COLOR_YUV2RGB_YV12, - COLOR_YUV420p2BGR = COLOR_YUV2BGR_YV12, - - COLOR_YUV2RGBA_YV12 = 102, - COLOR_YUV2BGRA_YV12 = 103, - COLOR_YUV2RGBA_IYUV = 104, - COLOR_YUV2BGRA_IYUV = 105, - COLOR_YUV2RGBA_I420 = COLOR_YUV2RGBA_IYUV, - COLOR_YUV2BGRA_I420 = COLOR_YUV2BGRA_IYUV, - COLOR_YUV420p2RGBA = COLOR_YUV2RGBA_YV12, - COLOR_YUV420p2BGRA = COLOR_YUV2BGRA_YV12, - - COLOR_YUV2GRAY_420 = 106, - COLOR_YUV2GRAY_NV21 = COLOR_YUV2GRAY_420, - COLOR_YUV2GRAY_NV12 = COLOR_YUV2GRAY_420, - COLOR_YUV2GRAY_YV12 = COLOR_YUV2GRAY_420, - COLOR_YUV2GRAY_IYUV = COLOR_YUV2GRAY_420, - COLOR_YUV2GRAY_I420 = COLOR_YUV2GRAY_420, - COLOR_YUV420sp2GRAY = COLOR_YUV2GRAY_420, - COLOR_YUV420p2GRAY = COLOR_YUV2GRAY_420, - - //YUV 4:2:2 formats family - COLOR_YUV2RGB_UYVY = 107, - COLOR_YUV2BGR_UYVY = 108, - //COLOR_YUV2RGB_VYUY = 109, - //COLOR_YUV2BGR_VYUY = 110, - COLOR_YUV2RGB_Y422 = COLOR_YUV2RGB_UYVY, - COLOR_YUV2BGR_Y422 = COLOR_YUV2BGR_UYVY, - COLOR_YUV2RGB_UYNV = COLOR_YUV2RGB_UYVY, - COLOR_YUV2BGR_UYNV = COLOR_YUV2BGR_UYVY, - - COLOR_YUV2RGBA_UYVY = 111, - COLOR_YUV2BGRA_UYVY = 112, - //COLOR_YUV2RGBA_VYUY = 113, - //COLOR_YUV2BGRA_VYUY = 114, - COLOR_YUV2RGBA_Y422 = COLOR_YUV2RGBA_UYVY, - COLOR_YUV2BGRA_Y422 = COLOR_YUV2BGRA_UYVY, - COLOR_YUV2RGBA_UYNV = COLOR_YUV2RGBA_UYVY, - COLOR_YUV2BGRA_UYNV = COLOR_YUV2BGRA_UYVY, - - COLOR_YUV2RGB_YUY2 = 115, - COLOR_YUV2BGR_YUY2 = 116, - COLOR_YUV2RGB_YVYU = 117, - COLOR_YUV2BGR_YVYU = 118, - COLOR_YUV2RGB_YUYV = COLOR_YUV2RGB_YUY2, - COLOR_YUV2BGR_YUYV = COLOR_YUV2BGR_YUY2, - COLOR_YUV2RGB_YUNV = COLOR_YUV2RGB_YUY2, - COLOR_YUV2BGR_YUNV = COLOR_YUV2BGR_YUY2, - - COLOR_YUV2RGBA_YUY2 = 119, - COLOR_YUV2BGRA_YUY2 = 120, - COLOR_YUV2RGBA_YVYU = 121, - COLOR_YUV2BGRA_YVYU = 122, - COLOR_YUV2RGBA_YUYV = COLOR_YUV2RGBA_YUY2, - COLOR_YUV2BGRA_YUYV = COLOR_YUV2BGRA_YUY2, - COLOR_YUV2RGBA_YUNV = COLOR_YUV2RGBA_YUY2, - COLOR_YUV2BGRA_YUNV = COLOR_YUV2BGRA_YUY2, - - COLOR_YUV2GRAY_UYVY = 123, - COLOR_YUV2GRAY_YUY2 = 124, - //COLOR_YUV2GRAY_VYUY = COLOR_YUV2GRAY_UYVY, - COLOR_YUV2GRAY_Y422 = COLOR_YUV2GRAY_UYVY, - COLOR_YUV2GRAY_UYNV = COLOR_YUV2GRAY_UYVY, - COLOR_YUV2GRAY_YVYU = COLOR_YUV2GRAY_YUY2, - COLOR_YUV2GRAY_YUYV = COLOR_YUV2GRAY_YUY2, - COLOR_YUV2GRAY_YUNV = COLOR_YUV2GRAY_YUY2, - - // alpha premultiplication - COLOR_RGBA2mRGBA = 125, - COLOR_mRGBA2RGBA = 126, - - COLOR_RGB2YUV_I420 = 127, - COLOR_BGR2YUV_I420 = 128, - COLOR_RGB2YUV_IYUV = COLOR_RGB2YUV_I420, - COLOR_BGR2YUV_IYUV = COLOR_BGR2YUV_I420, - - COLOR_RGBA2YUV_I420 = 129, - COLOR_BGRA2YUV_I420 = 130, - COLOR_RGBA2YUV_IYUV = COLOR_RGBA2YUV_I420, - COLOR_BGRA2YUV_IYUV = COLOR_BGRA2YUV_I420, - COLOR_RGB2YUV_YV12 = 131, - COLOR_BGR2YUV_YV12 = 132, - COLOR_RGBA2YUV_YV12 = 133, - COLOR_BGRA2YUV_YV12 = 134, - - // Edge-Aware Demosaicing - COLOR_BayerBG2BGR_EA = 135, - COLOR_BayerGB2BGR_EA = 136, - COLOR_BayerRG2BGR_EA = 137, - COLOR_BayerGR2BGR_EA = 138, - - COLOR_BayerBG2RGB_EA = COLOR_BayerRG2BGR_EA, - COLOR_BayerGB2RGB_EA = COLOR_BayerGR2BGR_EA, - COLOR_BayerRG2RGB_EA = COLOR_BayerBG2BGR_EA, - COLOR_BayerGR2RGB_EA = COLOR_BayerGB2BGR_EA, - - COLOR_COLORCVT_MAX = 139 -}; - - -//! converts image from one color space to another -CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn=0 ); //! raster image moments class CV_EXPORTS_W_MAP Moments @@ -1101,10 +762,10 @@ public: //! the full constructor Moments(double m00, double m10, double m01, double m20, double m11, double m02, double m30, double m21, double m12, double m03 ); - //! the conversion from CvMoments - Moments( const CvMoments& moments ); - //! the conversion to CvMoments - operator CvMoments() const; + ////! the conversion from CvMoments + //Moments( const CvMoments& moments ); + ////! the conversion to CvMoments + //operator CvMoments() const; //! spatial moments CV_PROP_RW double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; @@ -1114,127 +775,26 @@ public: CV_PROP_RW double nu20, nu11, nu02, nu30, nu21, nu12, nu03; }; -//! computes moments of the rasterized shape or a vector of points -CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage=false ); - -//! computes 7 Hu invariants from the moments -CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] ); -CV_EXPORTS_W void HuMoments( const Moments& m, OutputArray hu ); - -//! type of the template matching operation -enum { TM_SQDIFF=0, TM_SQDIFF_NORMED=1, TM_CCORR=2, TM_CCORR_NORMED=3, TM_CCOEFF=4, TM_CCOEFF_NORMED=5 }; - -//! computes the proximity map for the raster template and the image where the template is searched for -CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ, - OutputArray result, int method ); - -enum { CC_STAT_LEFT=0, CC_STAT_TOP=1, CC_STAT_WIDTH=2, CC_STAT_HEIGHT=3, CC_STAT_AREA=4, CC_STAT_MAX = 5}; - -// computes the connected components labeled image of boolean image ``image`` -// with 4 or 8 way connectivity - returns N, the total -// number of labels [0, N-1] where 0 represents the background label. -// ltype specifies the output label image type, an important -// consideration based on the total number of labels or -// alternatively the total number of pixels in the source image. -CV_EXPORTS_W int connectedComponents(InputArray image, OutputArray labels, - int connectivity = 8, int ltype=CV_32S); -CV_EXPORTS_W int connectedComponentsWithStats(InputArray image, OutputArray labels, - OutputArray stats, OutputArray centroids, - int connectivity = 8, int ltype=CV_32S); - -//! mode of the contour retrieval algorithm -enum -{ - RETR_EXTERNAL=CV_RETR_EXTERNAL, //!< retrieve only the most external (top-level) contours - RETR_LIST=CV_RETR_LIST, //!< retrieve all the contours without any hierarchical information - RETR_CCOMP=CV_RETR_CCOMP, //!< retrieve the connected components (that can possibly be nested) - RETR_TREE=CV_RETR_TREE, //!< retrieve all the contours and the whole hierarchy - RETR_FLOODFILL=CV_RETR_FLOODFILL -}; - -//! the contour approximation algorithm -enum -{ - CHAIN_APPROX_NONE=CV_CHAIN_APPROX_NONE, - CHAIN_APPROX_SIMPLE=CV_CHAIN_APPROX_SIMPLE, - CHAIN_APPROX_TC89_L1=CV_CHAIN_APPROX_TC89_L1, - CHAIN_APPROX_TC89_KCOS=CV_CHAIN_APPROX_TC89_KCOS -}; - -//! retrieves contours and the hierarchical information from black-n-white image. -CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours, - OutputArray hierarchy, int mode, - int method, Point offset=Point()); - -//! retrieves contours from black-n-white image. -CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours, - int mode, int method, Point offset=Point()); - -//! approximates contour or a curve using Douglas-Peucker algorithm -CV_EXPORTS_W void approxPolyDP( InputArray curve, - OutputArray approxCurve, - double epsilon, bool closed ); - -//! computes the contour perimeter (closed=true) or a curve length -CV_EXPORTS_W double arcLength( InputArray curve, bool closed ); -//! computes the bounding rectangle for a contour -CV_EXPORTS_W Rect boundingRect( InputArray points ); -//! computes the contour area -CV_EXPORTS_W double contourArea( InputArray contour, bool oriented=false ); -//! computes the minimal rotated rectangle for a set of points -CV_EXPORTS_W RotatedRect minAreaRect( InputArray points ); -//! computes the minimal enclosing circle for a set of points -CV_EXPORTS_W void minEnclosingCircle( InputArray points, - CV_OUT Point2f& center, CV_OUT float& radius ); -//! matches two contours using one of the available algorithms -CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2, - int method, double parameter ); -//! computes convex hull for a set of 2D points. -CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull, - bool clockwise=false, bool returnPoints=true ); -//! computes the contour convexity defects -CV_EXPORTS_W void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects ); - -//! returns true if the contour is convex. Does not support contours with self-intersection -CV_EXPORTS_W bool isContourConvex( InputArray contour ); - -//! finds intersection of two convex polygons -CV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2, - OutputArray _p12, bool handleNested=true ); - -//! fits ellipse to the set of 2D points -CV_EXPORTS_W RotatedRect fitEllipse( InputArray points ); - -//! fits line to the set of 2D points using M-estimator algorithm -CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType, - double param, double reps, double aeps ); -//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary -CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist ); - class CV_EXPORTS_W Subdiv2D { public: - enum - { - PTLOC_ERROR = -2, - PTLOC_OUTSIDE_RECT = -1, - PTLOC_INSIDE = 0, - PTLOC_VERTEX = 1, - PTLOC_ON_EDGE = 2 - }; + enum { PTLOC_ERROR = -2, + PTLOC_OUTSIDE_RECT = -1, + PTLOC_INSIDE = 0, + PTLOC_VERTEX = 1, + PTLOC_ON_EDGE = 2 + }; - enum - { - NEXT_AROUND_ORG = 0x00, - NEXT_AROUND_DST = 0x22, - PREV_AROUND_ORG = 0x11, - PREV_AROUND_DST = 0x33, - NEXT_AROUND_LEFT = 0x13, - NEXT_AROUND_RIGHT = 0x31, - PREV_AROUND_LEFT = 0x20, - PREV_AROUND_RIGHT = 0x02 - }; + enum { NEXT_AROUND_ORG = 0x00, + NEXT_AROUND_DST = 0x22, + PREV_AROUND_ORG = 0x11, + PREV_AROUND_DST = 0x33, + NEXT_AROUND_LEFT = 0x13, + NEXT_AROUND_RIGHT = 0x31, + PREV_AROUND_LEFT = 0x20, + PREV_AROUND_RIGHT = 0x02 + }; CV_WRAP Subdiv2D(); CV_WRAP Subdiv2D(Rect rect); @@ -1244,25 +804,25 @@ public: CV_WRAP void insert(const std::vector& ptvec); CV_WRAP int locate(Point2f pt, CV_OUT int& edge, CV_OUT int& vertex); - CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt=0); + CV_WRAP int findNearest(Point2f pt, CV_OUT Point2f* nearestPt = 0); CV_WRAP void getEdgeList(CV_OUT std::vector& edgeList) const; CV_WRAP void getTriangleList(CV_OUT std::vector& triangleList) const; CV_WRAP void getVoronoiFacetList(const std::vector& idx, CV_OUT std::vector >& facetList, CV_OUT std::vector& facetCenters); - CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge=0) const; + CV_WRAP Point2f getVertex(int vertex, CV_OUT int* firstEdge = 0) const; CV_WRAP int getEdge( int edge, int nextEdgeType ) const; CV_WRAP int nextEdge(int edge) const; CV_WRAP int rotateEdge(int edge, int rotate) const; CV_WRAP int symEdge(int edge) const; - CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt=0) const; - CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt=0) const; + CV_WRAP int edgeOrg(int edge, CV_OUT Point2f* orgpt = 0) const; + CV_WRAP int edgeDst(int edge, CV_OUT Point2f* dstpt = 0) const; protected: int newEdge(); void deleteEdge(int edge); - int newPoint(Point2f pt, bool isvirtual, int firstEdge=0); + int newPoint(Point2f pt, bool isvirtual, int firstEdge = 0); void deletePoint(int vtx); void setEdgePoints( int edge, int orgPt, int dstPt ); void splice( int edgeA, int edgeB ); @@ -1279,15 +839,18 @@ protected: Vertex(Point2f pt, bool _isvirtual, int _firstEdge=0); bool isvirtual() const; bool isfree() const; + int firstEdge; int type; Point2f pt; }; + struct CV_EXPORTS QuadEdge { QuadEdge(); QuadEdge(int edgeidx); bool isfree() const; + int next[4]; int pt[4]; }; @@ -1303,13 +866,540 @@ protected: Point2f bottomRight; }; + + +//! returns type (one of KERNEL_*) of 1D or 2D kernel specified by its coefficients. +CV_EXPORTS int getKernelType(InputArray kernel, Point anchor); + +//! returns the primitive row filter with the specified kernel +CV_EXPORTS Ptr getLinearRowFilter(int srcType, int bufType, + InputArray kernel, int anchor, + int symmetryType); + +//! returns the primitive column filter with the specified kernel +CV_EXPORTS Ptr getLinearColumnFilter(int bufType, int dstType, + InputArray kernel, int anchor, + int symmetryType, double delta = 0, + int bits = 0); + +//! returns 2D filter with the specified kernel +CV_EXPORTS Ptr getLinearFilter(int srcType, int dstType, + InputArray kernel, + Point anchor = Point(-1,-1), + double delta = 0, int bits = 0); + +//! returns the separable linear filter engine +CV_EXPORTS Ptr createSeparableLinearFilter(int srcType, int dstType, + InputArray rowKernel, InputArray columnKernel, + Point anchor = Point(-1,-1), double delta = 0, + int rowBorderType = BORDER_DEFAULT, + int columnBorderType = -1, + const Scalar& borderValue = Scalar()); + +//! returns the non-separable linear filter engine +CV_EXPORTS Ptr createLinearFilter(int srcType, int dstType, + InputArray kernel, Point _anchor = Point(-1,-1), + double delta = 0, int rowBorderType = BORDER_DEFAULT, + int columnBorderType = -1, const Scalar& borderValue = Scalar()); + +//! returns the Gaussian kernel with the specified parameters +CV_EXPORTS_W Mat getGaussianKernel( int ksize, double sigma, int ktype = CV_64F ); + +//! returns the Gaussian filter engine +CV_EXPORTS Ptr createGaussianFilter( int type, Size ksize, + double sigma1, double sigma2 = 0, + int borderType = BORDER_DEFAULT); + +//! initializes kernels of the generalized Sobel operator +CV_EXPORTS_W void getDerivKernels( OutputArray kx, OutputArray ky, + int dx, int dy, int ksize, + bool normalize = false, int ktype = CV_32F ); + +//! returns filter engine for the generalized Sobel operator +CV_EXPORTS Ptr createDerivFilter( int srcType, int dstType, + int dx, int dy, int ksize, + int borderType = BORDER_DEFAULT ); + +//! returns horizontal 1D box filter +CV_EXPORTS Ptr getRowSumFilter(int srcType, int sumType, + int ksize, int anchor = -1); + +//! returns vertical 1D box filter +CV_EXPORTS Ptr getColumnSumFilter( int sumType, int dstType, + int ksize, int anchor = -1, + double scale = 1); +//! returns box filter engine +CV_EXPORTS Ptr createBoxFilter( int srcType, int dstType, Size ksize, + Point anchor = Point(-1,-1), + bool normalize = true, + int borderType = BORDER_DEFAULT); + +//! returns the Gabor kernel with the specified parameters +CV_EXPORTS_W Mat getGaborKernel( Size ksize, double sigma, double theta, double lambd, + double gamma, double psi = CV_PI*0.5, int ktype = CV_64F ); + +//! returns horizontal 1D morphological filter +CV_EXPORTS Ptr getMorphologyRowFilter(int op, int type, int ksize, int anchor = -1); + +//! returns vertical 1D morphological filter +CV_EXPORTS Ptr getMorphologyColumnFilter(int op, int type, int ksize, int anchor = -1); + +//! returns 2D morphological filter +CV_EXPORTS Ptr getMorphologyFilter(int op, int type, InputArray kernel, + Point anchor = Point(-1,-1)); + +//! returns "magic" border value for erosion and dilation. It is automatically transformed to Scalar::all(-DBL_MAX) for dilation. +static inline Scalar morphologyDefaultBorderValue() { return Scalar::all(DBL_MAX); } + +//! returns morphological filter engine. Only MORPH_ERODE and MORPH_DILATE are supported. +CV_EXPORTS Ptr createMorphologyFilter(int op, int type, InputArray kernel, + Point anchor = Point(-1,-1), int rowBorderType = BORDER_CONSTANT, + int columnBorderType = -1, const Scalar& borderValue = morphologyDefaultBorderValue()); + +//! returns structuring element of the specified shape and size +CV_EXPORTS_W Mat getStructuringElement(int shape, Size ksize, Point anchor = Point(-1,-1)); + +//! 1D interpolation function: returns coordinate of the "donor" pixel for the specified location p. +CV_EXPORTS_W int borderInterpolate( int p, int len, int borderType ); + +//! copies 2D array to a larger destination array with extrapolation of the outer part of src using the specified border mode +CV_EXPORTS_W void copyMakeBorder( InputArray src, OutputArray dst, + int top, int bottom, int left, int right, + int borderType, const Scalar& value = Scalar() ); + +//! smooths the image using median filter. +CV_EXPORTS_W void medianBlur( InputArray src, OutputArray dst, int ksize ); + +//! smooths the image using Gaussian filter. +CV_EXPORTS_W void GaussianBlur( InputArray src, OutputArray dst, Size ksize, + double sigmaX, double sigmaY = 0, + int borderType = BORDER_DEFAULT ); + +//! smooths the image using bilateral filter +CV_EXPORTS_W void bilateralFilter( InputArray src, OutputArray dst, int d, + double sigmaColor, double sigmaSpace, + int borderType = BORDER_DEFAULT ); + +//! smooths the image using the box filter. Each pixel is processed in O(1) time +CV_EXPORTS_W void boxFilter( InputArray src, OutputArray dst, int ddepth, + Size ksize, Point anchor = Point(-1,-1), + bool normalize = true, + int borderType = BORDER_DEFAULT ); + +//! a synonym for normalized box filter +CV_EXPORTS_W void blur( InputArray src, OutputArray dst, + Size ksize, Point anchor = Point(-1,-1), + int borderType = BORDER_DEFAULT ); + +//! applies non-separable 2D linear filter to the image +CV_EXPORTS_W void filter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernel, Point anchor = Point(-1,-1), + double delta = 0, int borderType = BORDER_DEFAULT ); + +//! applies separable 2D linear filter to the image +CV_EXPORTS_W void sepFilter2D( InputArray src, OutputArray dst, int ddepth, + InputArray kernelX, InputArray kernelY, + Point anchor = Point(-1,-1), + double delta = 0, int borderType = BORDER_DEFAULT ); + +//! applies generalized Sobel operator to the image +CV_EXPORTS_W void Sobel( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, int ksize = 3, + double scale = 1, double delta = 0, + int borderType = BORDER_DEFAULT ); + +//! applies the vertical or horizontal Scharr operator to the image +CV_EXPORTS_W void Scharr( InputArray src, OutputArray dst, int ddepth, + int dx, int dy, double scale = 1, double delta = 0, + int borderType = BORDER_DEFAULT ); + +//! applies Laplacian operator to the image +CV_EXPORTS_W void Laplacian( InputArray src, OutputArray dst, int ddepth, + int ksize = 1, double scale = 1, double delta = 0, + int borderType = BORDER_DEFAULT ); + +//! applies Canny edge detector and produces the edge map. +CV_EXPORTS_W void Canny( InputArray image, OutputArray edges, + double threshold1, double threshold2, + int apertureSize = 3, bool L2gradient = false ); + +//! computes minimum eigen value of 2x2 derivative covariation matrix at each pixel - the cornerness criteria +CV_EXPORTS_W void cornerMinEigenVal( InputArray src, OutputArray dst, + int blockSize, int ksize = 3, + int borderType = BORDER_DEFAULT ); + +//! computes Harris cornerness criteria at each image pixel +CV_EXPORTS_W void cornerHarris( InputArray src, OutputArray dst, int blockSize, + int ksize, double k, + int borderType = BORDER_DEFAULT ); + +//! computes both eigenvalues and the eigenvectors of 2x2 derivative covariation matrix at each pixel. The output is stored as 6-channel matrix. +CV_EXPORTS_W void cornerEigenValsAndVecs( InputArray src, OutputArray dst, + int blockSize, int ksize, + int borderType = BORDER_DEFAULT ); + +//! computes another complex cornerness criteria at each pixel +CV_EXPORTS_W void preCornerDetect( InputArray src, OutputArray dst, int ksize, + int borderType = BORDER_DEFAULT ); + +//! adjusts the corner locations with sub-pixel accuracy to maximize the certain cornerness criteria +CV_EXPORTS_W void cornerSubPix( InputArray image, InputOutputArray corners, + Size winSize, Size zeroZone, + TermCriteria criteria ); + +//! finds the strong enough corners where the cornerMinEigenVal() or cornerHarris() report the local maxima +CV_EXPORTS_W void goodFeaturesToTrack( InputArray image, OutputArray corners, + int maxCorners, double qualityLevel, double minDistance, + InputArray mask = noArray(), int blockSize = 3, + bool useHarrisDetector = false, double k = 0.04 ); + +//! finds lines in the black-n-white image using the standard or pyramid Hough transform +CV_EXPORTS_W void HoughLines( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double srn = 0, double stn = 0 ); + +//! finds line segments in the black-n-white image using probabalistic Hough transform +CV_EXPORTS_W void HoughLinesP( InputArray image, OutputArray lines, + double rho, double theta, int threshold, + double minLineLength = 0, double maxLineGap = 0 ); + +//! finds circles in the grayscale image using 2+1 gradient Hough transform +CV_EXPORTS_W void HoughCircles( InputArray image, OutputArray circles, + int method, double dp, double minDist, + double param1 = 100, double param2 = 100, + int minRadius = 0, int maxRadius = 0 ); + +//! erodes the image (applies the local minimum operator) +CV_EXPORTS_W void erode( InputArray src, OutputArray dst, InputArray kernel, + Point anchor = Point(-1,-1), int iterations = 1, + int borderType = BORDER_CONSTANT, + const Scalar& borderValue = morphologyDefaultBorderValue() ); + +//! dilates the image (applies the local maximum operator) +CV_EXPORTS_W void dilate( InputArray src, OutputArray dst, InputArray kernel, + Point anchor = Point(-1,-1), int iterations = 1, + int borderType = BORDER_CONSTANT, + const Scalar& borderValue = morphologyDefaultBorderValue() ); + +//! applies an advanced morphological operation to the image +CV_EXPORTS_W void morphologyEx( InputArray src, OutputArray dst, + int op, InputArray kernel, + Point anchor = Point(-1,-1), int iterations = 1, + int borderType = BORDER_CONSTANT, + const Scalar& borderValue = morphologyDefaultBorderValue() ); + +//! resizes the image +CV_EXPORTS_W void resize( InputArray src, OutputArray dst, + Size dsize, double fx = 0, double fy = 0, + int interpolation = INTER_LINEAR ); + +//! warps the image using affine transformation +CV_EXPORTS_W void warpAffine( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags = INTER_LINEAR, + int borderMode = BORDER_CONSTANT, + const Scalar& borderValue = Scalar()); + +//! warps the image using perspective transformation +CV_EXPORTS_W void warpPerspective( InputArray src, OutputArray dst, + InputArray M, Size dsize, + int flags = INTER_LINEAR, + int borderMode = BORDER_CONSTANT, + const Scalar& borderValue = Scalar()); + +//! warps the image using the precomputed maps. The maps are stored in either floating-point or integer fixed-point format +CV_EXPORTS_W void remap( InputArray src, OutputArray dst, + InputArray map1, InputArray map2, + int interpolation, int borderMode = BORDER_CONSTANT, + const Scalar& borderValue = Scalar()); + +//! converts maps for remap from floating-point to fixed-point format or backwards +CV_EXPORTS_W void convertMaps( InputArray map1, InputArray map2, + OutputArray dstmap1, OutputArray dstmap2, + int dstmap1type, bool nninterpolation = false ); + +//! returns 2x3 affine transformation matrix for the planar rotation. +CV_EXPORTS_W Mat getRotationMatrix2D( Point2f center, double angle, double scale ); + +//! returns 3x3 perspective transformation for the corresponding 4 point pairs. +CV_EXPORTS Mat getPerspectiveTransform( const Point2f src[], const Point2f dst[] ); + +//! returns 2x3 affine transformation for the corresponding 3 point pairs. +CV_EXPORTS Mat getAffineTransform( const Point2f src[], const Point2f dst[] ); + +//! computes 2x3 affine transformation matrix that is inverse to the specified 2x3 affine transformation. +CV_EXPORTS_W void invertAffineTransform( InputArray M, OutputArray iM ); + +CV_EXPORTS_W Mat getPerspectiveTransform( InputArray src, InputArray dst ); + +CV_EXPORTS_W Mat getAffineTransform( InputArray src, InputArray dst ); + +//! extracts rectangle from the image at sub-pixel location +CV_EXPORTS_W void getRectSubPix( InputArray image, Size patchSize, + Point2f center, OutputArray patch, int patchType = -1 ); + +//! computes the integral image +CV_EXPORTS_W void integral( InputArray src, OutputArray sum, int sdepth = -1 ); + +//! computes the integral image and integral for the squared image +CV_EXPORTS_AS(integral2) void integral( InputArray src, OutputArray sum, + OutputArray sqsum, int sdepth = -1 ); + +//! computes the integral image, integral for the squared image and the tilted integral image +CV_EXPORTS_AS(integral3) void integral( InputArray src, OutputArray sum, + OutputArray sqsum, OutputArray tilted, + int sdepth = -1 ); + +//! adds image to the accumulator (dst += src). Unlike cv::add, dst and src can have different types. +CV_EXPORTS_W void accumulate( InputArray src, InputOutputArray dst, + InputArray mask = noArray() ); + +//! adds squared src image to the accumulator (dst += src*src). +CV_EXPORTS_W void accumulateSquare( InputArray src, InputOutputArray dst, + InputArray mask = noArray() ); +//! adds product of the 2 images to the accumulator (dst += src1*src2). +CV_EXPORTS_W void accumulateProduct( InputArray src1, InputArray src2, + InputOutputArray dst, InputArray mask=noArray() ); + +//! updates the running average (dst = dst*(1-alpha) + src*alpha) +CV_EXPORTS_W void accumulateWeighted( InputArray src, InputOutputArray dst, + double alpha, InputArray mask = noArray() ); + +//! computes PSNR image/video quality metric +CV_EXPORTS_W double PSNR(InputArray src1, InputArray src2); + +CV_EXPORTS_W Point2d phaseCorrelate(InputArray src1, InputArray src2, + InputArray window = noArray(), CV_OUT double* response = 0); + +CV_EXPORTS_W void createHanningWindow(OutputArray dst, Size winSize, int type); + +//! applies fixed threshold to the image +CV_EXPORTS_W double threshold( InputArray src, OutputArray dst, + double thresh, double maxval, int type ); + + +//! applies variable (adaptive) threshold to the image +CV_EXPORTS_W void adaptiveThreshold( InputArray src, OutputArray dst, + double maxValue, int adaptiveMethod, + int thresholdType, int blockSize, double C ); + +//! smooths and downsamples the image +CV_EXPORTS_W void pyrDown( InputArray src, OutputArray dst, + const Size& dstsize = Size(), int borderType = BORDER_DEFAULT ); + +//! upsamples and smoothes the image +CV_EXPORTS_W void pyrUp( InputArray src, OutputArray dst, + const Size& dstsize = Size(), int borderType = BORDER_DEFAULT ); + +//! builds the gaussian pyramid using pyrDown() as a basic operation +CV_EXPORTS void buildPyramid( InputArray src, OutputArrayOfArrays dst, + int maxlevel, int borderType = BORDER_DEFAULT ); + +//! corrects lens distortion for the given camera matrix and distortion coefficients +CV_EXPORTS_W void undistort( InputArray src, OutputArray dst, + InputArray cameraMatrix, + InputArray distCoeffs, + InputArray newCameraMatrix = noArray() ); + +//! initializes maps for cv::remap() to correct lens distortion and optionally rectify the image +CV_EXPORTS_W void initUndistortRectifyMap( InputArray cameraMatrix, InputArray distCoeffs, + InputArray R, InputArray newCameraMatrix, + Size size, int m1type, OutputArray map1, OutputArray map2 ); + +//! initializes maps for cv::remap() for wide-angle +CV_EXPORTS_W float initWideAngleProjMap( InputArray cameraMatrix, InputArray distCoeffs, + Size imageSize, int destImageWidth, + int m1type, OutputArray map1, OutputArray map2, + int projType = PROJ_SPHERICAL_EQRECT, double alpha = 0); + +//! returns the default new camera matrix (by default it is the same as cameraMatrix unless centerPricipalPoint=true) +CV_EXPORTS_W Mat getDefaultNewCameraMatrix( InputArray cameraMatrix, Size imgsize = Size(), + bool centerPrincipalPoint = false ); + +//! returns points' coordinates after lens distortion correction +CV_EXPORTS_W void undistortPoints( InputArray src, OutputArray dst, + InputArray cameraMatrix, InputArray distCoeffs, + InputArray R = noArray(), InputArray P = noArray()); + +//! computes the joint dense histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + OutputArray hist, int dims, const int* histSize, + const float** ranges, bool uniform = true, bool accumulate = false ); + +//! computes the joint sparse histogram for a set of images. +CV_EXPORTS void calcHist( const Mat* images, int nimages, + const int* channels, InputArray mask, + SparseMat& hist, int dims, + const int* histSize, const float** ranges, + bool uniform = true, bool accumulate = false ); + +CV_EXPORTS_W void calcHist( InputArrayOfArrays images, + const std::vector& channels, + InputArray mask, OutputArray hist, + const std::vector& histSize, + const std::vector& ranges, + bool accumulate = false ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, InputArray hist, + OutputArray backProject, const float** ranges, + double scale = 1, bool uniform = true ); + +//! computes back projection for the set of images +CV_EXPORTS void calcBackProject( const Mat* images, int nimages, + const int* channels, const SparseMat& hist, + OutputArray backProject, const float** ranges, + double scale = 1, bool uniform = true ); + +CV_EXPORTS_W void calcBackProject( InputArrayOfArrays images, const std::vector& channels, + InputArray hist, OutputArray dst, + const std::vector& ranges, + double scale ); + +//! compares two histograms stored in dense arrays +CV_EXPORTS_W double compareHist( InputArray H1, InputArray H2, int method ); + +//! compares two histograms stored in sparse arrays +CV_EXPORTS double compareHist( const SparseMat& H1, const SparseMat& H2, int method ); + +//! normalizes the grayscale image brightness and contrast by normalizing its histogram +CV_EXPORTS_W void equalizeHist( InputArray src, OutputArray dst ); + +CV_EXPORTS float EMD( InputArray signature1, InputArray signature2, + int distType, InputArray cost=noArray(), + float* lowerBound = 0, OutputArray flow = noArray() ); + +//! segments the image using watershed algorithm +CV_EXPORTS_W void watershed( InputArray image, InputOutputArray markers ); + +//! filters image using meanshift algorithm +CV_EXPORTS_W void pyrMeanShiftFiltering( InputArray src, OutputArray dst, + double sp, double sr, int maxLevel = 1, + TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ); + +//! segments the image using GrabCut algorithm +CV_EXPORTS_W void grabCut( InputArray img, InputOutputArray mask, Rect rect, + InputOutputArray bgdModel, InputOutputArray fgdModel, + int iterCount, int mode = GC_EVAL ); + + +//! builds the discrete Voronoi diagram +CV_EXPORTS_AS(distanceTransformWithLabels) void distanceTransform( InputArray src, OutputArray dst, + OutputArray labels, int distanceType, int maskSize, + int labelType = DIST_LABEL_CCOMP ); + +//! computes the distance transform map +CV_EXPORTS_W void distanceTransform( InputArray src, OutputArray dst, + int distanceType, int maskSize ); + + +//! fills the semi-uniform image region starting from the specified seed point +CV_EXPORTS int floodFill( InputOutputArray image, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect = 0, + Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), + int flags = 4 ); + +//! fills the semi-uniform image region and/or the mask starting from the specified seed point +CV_EXPORTS_W int floodFill( InputOutputArray image, InputOutputArray mask, + Point seedPoint, Scalar newVal, CV_OUT Rect* rect=0, + Scalar loDiff = Scalar(), Scalar upDiff = Scalar(), + int flags = 4 ); + +//! converts image from one color space to another +CV_EXPORTS_W void cvtColor( InputArray src, OutputArray dst, int code, int dstCn = 0 ); + // main function for all demosaicing procceses CV_EXPORTS_W void demosaicing(InputArray _src, OutputArray _dst, int code, int dcn = 0); -} +//! computes moments of the rasterized shape or a vector of points +CV_EXPORTS_W Moments moments( InputArray array, bool binaryImage = false ); -#endif /* __cplusplus */ +//! computes 7 Hu invariants from the moments +CV_EXPORTS void HuMoments( const Moments& moments, double hu[7] ); + +CV_EXPORTS_W void HuMoments( const Moments& m, OutputArray hu ); + +//! computes the proximity map for the raster template and the image where the template is searched for +CV_EXPORTS_W void matchTemplate( InputArray image, InputArray templ, + OutputArray result, int method ); + + +// computes the connected components labeled image of boolean image ``image`` +// with 4 or 8 way connectivity - returns N, the total +// number of labels [0, N-1] where 0 represents the background label. +// ltype specifies the output label image type, an important +// consideration based on the total number of labels or +// alternatively the total number of pixels in the source image. +CV_EXPORTS_W int connectedComponents(InputArray image, OutputArray labels, + int connectivity = 8, int ltype = CV_32S); + +CV_EXPORTS_W int connectedComponentsWithStats(InputArray image, OutputArray labels, + OutputArray stats, OutputArray centroids, + int connectivity = 8, int ltype = CV_32S); + + +//! retrieves contours and the hierarchical information from black-n-white image. +CV_EXPORTS_W void findContours( InputOutputArray image, OutputArrayOfArrays contours, + OutputArray hierarchy, int mode, + int method, Point offset = Point()); + +//! retrieves contours from black-n-white image. +CV_EXPORTS void findContours( InputOutputArray image, OutputArrayOfArrays contours, + int mode, int method, Point offset = Point()); + +//! approximates contour or a curve using Douglas-Peucker algorithm +CV_EXPORTS_W void approxPolyDP( InputArray curve, + OutputArray approxCurve, + double epsilon, bool closed ); + +//! computes the contour perimeter (closed=true) or a curve length +CV_EXPORTS_W double arcLength( InputArray curve, bool closed ); + +//! computes the bounding rectangle for a contour +CV_EXPORTS_W Rect boundingRect( InputArray points ); + +//! computes the contour area +CV_EXPORTS_W double contourArea( InputArray contour, bool oriented = false ); + +//! computes the minimal rotated rectangle for a set of points +CV_EXPORTS_W RotatedRect minAreaRect( InputArray points ); + +//! computes the minimal enclosing circle for a set of points +CV_EXPORTS_W void minEnclosingCircle( InputArray points, + CV_OUT Point2f& center, CV_OUT float& radius ); + +//! matches two contours using one of the available algorithms +CV_EXPORTS_W double matchShapes( InputArray contour1, InputArray contour2, + int method, double parameter ); + +//! computes convex hull for a set of 2D points. +CV_EXPORTS_W void convexHull( InputArray points, OutputArray hull, + bool clockwise = false, bool returnPoints = true ); + +//! computes the contour convexity defects +CV_EXPORTS_W void convexityDefects( InputArray contour, InputArray convexhull, OutputArray convexityDefects ); + +//! returns true if the contour is convex. Does not support contours with self-intersection +CV_EXPORTS_W bool isContourConvex( InputArray contour ); + +//! finds intersection of two convex polygons +CV_EXPORTS_W float intersectConvexConvex( InputArray _p1, InputArray _p2, + OutputArray _p12, bool handleNested = true ); + +//! fits ellipse to the set of 2D points +CV_EXPORTS_W RotatedRect fitEllipse( InputArray points ); + +//! fits line to the set of 2D points using M-estimator algorithm +CV_EXPORTS_W void fitLine( InputArray points, OutputArray line, int distType, + double param, double reps, double aeps ); + +//! checks if the point is inside the contour. Optionally computes the signed distance from the point to the contour boundary +CV_EXPORTS_W double pointPolygonTest( InputArray contour, Point2f pt, bool measureDist ); + +CV_EXPORTS Ptr createCLAHE(double clipLimit = 40.0, Size tileGridSize = Size(8, 8)); + +} // cv #endif - -/* End of file. */ diff --git a/modules/imgproc/include/opencv2/imgproc/types_c.h b/modules/imgproc/include/opencv2/imgproc/types_c.h index 8e65269866..5b89e2d7c1 100644 --- a/modules/imgproc/include/opencv2/imgproc/types_c.h +++ b/modules/imgproc/include/opencv2/imgproc/types_c.h @@ -45,6 +45,10 @@ #include "opencv2/core/core_c.h" +#ifdef __cplusplus +# include "opencv2/imgproc.hpp" +#endif + #ifdef __cplusplus extern "C" { #endif @@ -383,6 +387,24 @@ typedef struct CvMoments double m00, m10, m01, m20, m11, m02, m30, m21, m12, m03; /* spatial moments */ double mu20, mu11, mu02, mu30, mu21, mu12, mu03; /* central moments */ double inv_sqrt_m00; /* m00 != 0 ? 1/sqrt(m00) : 0 */ + +#ifdef __cplusplus + CvMoments(){} + CvMoments(const cv::Moments& m) + { + m00 = m.m00; m10 = m.m10; m01 = m.m01; + m20 = m.m20; m11 = m.m11; m02 = m.m02; + m30 = m.m30; m21 = m.m21; m12 = m.m12; m03 = m.m03; + mu20 = m.mu20; mu11 = m.mu11; mu02 = m.mu02; + mu30 = m.mu30; mu21 = m.mu21; mu12 = m.mu12; mu03 = m.mu03; + double am00 = std::abs(m.m00); + inv_sqrt_m00 = am00 > DBL_EPSILON ? 1./std::sqrt(am00) : 0; + } + operator cv::Moments() const + { + return cv::Moments(m00, m10, m01, m20, m11, m02, m30, m21, m12, m03); + } +#endif } CvMoments; diff --git a/modules/imgproc/perf/perf_cvt_color.cpp b/modules/imgproc/perf/perf_cvt_color.cpp index 2038fc88c9..b7eb71f2c9 100644 --- a/modules/imgproc/perf/perf_cvt_color.cpp +++ b/modules/imgproc/perf/perf_cvt_color.cpp @@ -9,114 +9,114 @@ using std::tr1::get; //extra color conversions supported implicitly enum { - CX_BGRA2HLS = CV_COLORCVT_MAX + CV_BGR2HLS, - CX_BGRA2HLS_FULL = CV_COLORCVT_MAX + CV_BGR2HLS_FULL, - CX_BGRA2HSV = CV_COLORCVT_MAX + CV_BGR2HSV, - CX_BGRA2HSV_FULL = CV_COLORCVT_MAX + CV_BGR2HSV_FULL, - CX_BGRA2Lab = CV_COLORCVT_MAX + CV_BGR2Lab, - CX_BGRA2Luv = CV_COLORCVT_MAX + CV_BGR2Luv, - CX_BGRA2XYZ = CV_COLORCVT_MAX + CV_BGR2XYZ, - CX_BGRA2YCrCb = CV_COLORCVT_MAX + CV_BGR2YCrCb, - CX_BGRA2YUV = CV_COLORCVT_MAX + CV_BGR2YUV, - CX_HLS2BGRA = CV_COLORCVT_MAX + CV_HLS2BGR, - CX_HLS2BGRA_FULL = CV_COLORCVT_MAX + CV_HLS2BGR_FULL, - CX_HLS2RGBA = CV_COLORCVT_MAX + CV_HLS2RGB, - CX_HLS2RGBA_FULL = CV_COLORCVT_MAX + CV_HLS2RGB_FULL, - CX_HSV2BGRA = CV_COLORCVT_MAX + CV_HSV2BGR, - CX_HSV2BGRA_FULL = CV_COLORCVT_MAX + CV_HSV2BGR_FULL, - CX_HSV2RGBA = CV_COLORCVT_MAX + CV_HSV2RGB, - CX_HSV2RGBA_FULL = CV_COLORCVT_MAX + CV_HSV2RGB_FULL, - CX_Lab2BGRA = CV_COLORCVT_MAX + CV_Lab2BGR, - CX_Lab2LBGRA = CV_COLORCVT_MAX + CV_Lab2LBGR, - CX_Lab2LRGBA = CV_COLORCVT_MAX + CV_Lab2LRGB, - CX_Lab2RGBA = CV_COLORCVT_MAX + CV_Lab2RGB, - CX_LBGRA2Lab = CV_COLORCVT_MAX + CV_LBGR2Lab, - CX_LBGRA2Luv = CV_COLORCVT_MAX + CV_LBGR2Luv, - CX_LRGBA2Lab = CV_COLORCVT_MAX + CV_LRGB2Lab, - CX_LRGBA2Luv = CV_COLORCVT_MAX + CV_LRGB2Luv, - CX_Luv2BGRA = CV_COLORCVT_MAX + CV_Luv2BGR, - CX_Luv2LBGRA = CV_COLORCVT_MAX + CV_Luv2LBGR, - CX_Luv2LRGBA = CV_COLORCVT_MAX + CV_Luv2LRGB, - CX_Luv2RGBA = CV_COLORCVT_MAX + CV_Luv2RGB, - CX_RGBA2HLS = CV_COLORCVT_MAX + CV_RGB2HLS, - CX_RGBA2HLS_FULL = CV_COLORCVT_MAX + CV_RGB2HLS_FULL, - CX_RGBA2HSV = CV_COLORCVT_MAX + CV_RGB2HSV, - CX_RGBA2HSV_FULL = CV_COLORCVT_MAX + CV_RGB2HSV_FULL, - CX_RGBA2Lab = CV_COLORCVT_MAX + CV_RGB2Lab, - CX_RGBA2Luv = CV_COLORCVT_MAX + CV_RGB2Luv, - CX_RGBA2XYZ = CV_COLORCVT_MAX + CV_RGB2XYZ, - CX_RGBA2YCrCb = CV_COLORCVT_MAX + CV_RGB2YCrCb, - CX_RGBA2YUV = CV_COLORCVT_MAX + CV_RGB2YUV, - CX_XYZ2BGRA = CV_COLORCVT_MAX + CV_XYZ2BGR, - CX_XYZ2RGBA = CV_COLORCVT_MAX + CV_XYZ2RGB, - CX_YCrCb2BGRA = CV_COLORCVT_MAX + CV_YCrCb2BGR, - CX_YCrCb2RGBA = CV_COLORCVT_MAX + CV_YCrCb2RGB, - CX_YUV2BGRA = CV_COLORCVT_MAX + CV_YUV2BGR, - CX_YUV2RGBA = CV_COLORCVT_MAX + CV_YUV2RGB + CX_BGRA2HLS = COLOR_COLORCVT_MAX + COLOR_BGR2HLS, + CX_BGRA2HLS_FULL = COLOR_COLORCVT_MAX + COLOR_BGR2HLS_FULL, + CX_BGRA2HSV = COLOR_COLORCVT_MAX + COLOR_BGR2HSV, + CX_BGRA2HSV_FULL = COLOR_COLORCVT_MAX + COLOR_BGR2HSV_FULL, + CX_BGRA2Lab = COLOR_COLORCVT_MAX + COLOR_BGR2Lab, + CX_BGRA2Luv = COLOR_COLORCVT_MAX + COLOR_BGR2Luv, + CX_BGRA2XYZ = COLOR_COLORCVT_MAX + COLOR_BGR2XYZ, + CX_BGRA2YCrCb = COLOR_COLORCVT_MAX + COLOR_BGR2YCrCb, + CX_BGRA2YUV = COLOR_COLORCVT_MAX + COLOR_BGR2YUV, + CX_HLS2BGRA = COLOR_COLORCVT_MAX + COLOR_HLS2BGR, + CX_HLS2BGRA_FULL = COLOR_COLORCVT_MAX + COLOR_HLS2BGR_FULL, + CX_HLS2RGBA = COLOR_COLORCVT_MAX + COLOR_HLS2RGB, + CX_HLS2RGBA_FULL = COLOR_COLORCVT_MAX + COLOR_HLS2RGB_FULL, + CX_HSV2BGRA = COLOR_COLORCVT_MAX + COLOR_HSV2BGR, + CX_HSV2BGRA_FULL = COLOR_COLORCVT_MAX + COLOR_HSV2BGR_FULL, + CX_HSV2RGBA = COLOR_COLORCVT_MAX + COLOR_HSV2RGB, + CX_HSV2RGBA_FULL = COLOR_COLORCVT_MAX + COLOR_HSV2RGB_FULL, + CX_Lab2BGRA = COLOR_COLORCVT_MAX + COLOR_Lab2BGR, + CX_Lab2LBGRA = COLOR_COLORCVT_MAX + COLOR_Lab2LBGR, + CX_Lab2LRGBA = COLOR_COLORCVT_MAX + COLOR_Lab2LRGB, + CX_Lab2RGBA = COLOR_COLORCVT_MAX + COLOR_Lab2RGB, + CX_LBGRA2Lab = COLOR_COLORCVT_MAX + COLOR_LBGR2Lab, + CX_LBGRA2Luv = COLOR_COLORCVT_MAX + COLOR_LBGR2Luv, + CX_LRGBA2Lab = COLOR_COLORCVT_MAX + COLOR_LRGB2Lab, + CX_LRGBA2Luv = COLOR_COLORCVT_MAX + COLOR_LRGB2Luv, + CX_Luv2BGRA = COLOR_COLORCVT_MAX + COLOR_Luv2BGR, + CX_Luv2LBGRA = COLOR_COLORCVT_MAX + COLOR_Luv2LBGR, + CX_Luv2LRGBA = COLOR_COLORCVT_MAX + COLOR_Luv2LRGB, + CX_Luv2RGBA = COLOR_COLORCVT_MAX + COLOR_Luv2RGB, + CX_RGBA2HLS = COLOR_COLORCVT_MAX + COLOR_RGB2HLS, + CX_RGBA2HLS_FULL = COLOR_COLORCVT_MAX + COLOR_RGB2HLS_FULL, + CX_RGBA2HSV = COLOR_COLORCVT_MAX + COLOR_RGB2HSV, + CX_RGBA2HSV_FULL = COLOR_COLORCVT_MAX + COLOR_RGB2HSV_FULL, + CX_RGBA2Lab = COLOR_COLORCVT_MAX + COLOR_RGB2Lab, + CX_RGBA2Luv = COLOR_COLORCVT_MAX + COLOR_RGB2Luv, + CX_RGBA2XYZ = COLOR_COLORCVT_MAX + COLOR_RGB2XYZ, + CX_RGBA2YCrCb = COLOR_COLORCVT_MAX + COLOR_RGB2YCrCb, + CX_RGBA2YUV = COLOR_COLORCVT_MAX + COLOR_RGB2YUV, + CX_XYZ2BGRA = COLOR_COLORCVT_MAX + COLOR_XYZ2BGR, + CX_XYZ2RGBA = COLOR_COLORCVT_MAX + COLOR_XYZ2RGB, + CX_YCrCb2BGRA = COLOR_COLORCVT_MAX + COLOR_YCrCb2BGR, + CX_YCrCb2RGBA = COLOR_COLORCVT_MAX + COLOR_YCrCb2RGB, + CX_YUV2BGRA = COLOR_COLORCVT_MAX + COLOR_YUV2BGR, + CX_YUV2RGBA = COLOR_COLORCVT_MAX + COLOR_YUV2RGB }; CV_ENUM(CvtMode, - CV_BGR2BGR555, CV_BGR2BGR565, CV_BGR2BGRA, CV_BGR2GRAY, - CV_BGR2HLS, CV_BGR2HLS_FULL, CV_BGR2HSV, CV_BGR2HSV_FULL, - CV_BGR2Lab, CV_BGR2Luv, CV_BGR2RGB, CV_BGR2RGBA, CV_BGR2XYZ, - CV_BGR2YCrCb, CV_BGR2YUV, CV_BGR5552BGR, CV_BGR5552BGRA, + COLOR_BGR2BGR555, COLOR_BGR2BGR565, COLOR_BGR2BGRA, COLOR_BGR2GRAY, + COLOR_BGR2HLS, COLOR_BGR2HLS_FULL, COLOR_BGR2HSV, COLOR_BGR2HSV_FULL, + COLOR_BGR2Lab, COLOR_BGR2Luv, COLOR_BGR2RGB, COLOR_BGR2RGBA, COLOR_BGR2XYZ, + COLOR_BGR2YCrCb, COLOR_BGR2YUV, COLOR_BGR5552BGR, COLOR_BGR5552BGRA, - CV_BGR5552GRAY, CV_BGR5552RGB, CV_BGR5552RGBA, CV_BGR5652BGR, - CV_BGR5652BGRA, CV_BGR5652GRAY, CV_BGR5652RGB, CV_BGR5652RGBA, + COLOR_BGR5552GRAY, COLOR_BGR5552RGB, COLOR_BGR5552RGBA, COLOR_BGR5652BGR, + COLOR_BGR5652BGRA, COLOR_BGR5652GRAY, COLOR_BGR5652RGB, COLOR_BGR5652RGBA, - CV_BGRA2BGR, CV_BGRA2BGR555, CV_BGRA2BGR565, CV_BGRA2GRAY, CV_BGRA2RGBA, + COLOR_BGRA2BGR, COLOR_BGRA2BGR555, COLOR_BGRA2BGR565, COLOR_BGRA2GRAY, COLOR_BGRA2RGBA, CX_BGRA2HLS, CX_BGRA2HLS_FULL, CX_BGRA2HSV, CX_BGRA2HSV_FULL, CX_BGRA2Lab, CX_BGRA2Luv, CX_BGRA2XYZ, CX_BGRA2YCrCb, CX_BGRA2YUV, - CV_GRAY2BGR, CV_GRAY2BGR555, CV_GRAY2BGR565, CV_GRAY2BGRA, + COLOR_GRAY2BGR, COLOR_GRAY2BGR555, COLOR_GRAY2BGR565, COLOR_GRAY2BGRA, - CV_HLS2BGR, CV_HLS2BGR_FULL, CV_HLS2RGB, CV_HLS2RGB_FULL, + COLOR_HLS2BGR, COLOR_HLS2BGR_FULL, COLOR_HLS2RGB, COLOR_HLS2RGB_FULL, CX_HLS2BGRA, CX_HLS2BGRA_FULL, CX_HLS2RGBA, CX_HLS2RGBA_FULL, - CV_HSV2BGR, CV_HSV2BGR_FULL, CV_HSV2RGB, CV_HSV2RGB_FULL, + COLOR_HSV2BGR, COLOR_HSV2BGR_FULL, COLOR_HSV2RGB, COLOR_HSV2RGB_FULL, CX_HSV2BGRA, CX_HSV2BGRA_FULL, CX_HSV2RGBA, CX_HSV2RGBA_FULL, - CV_Lab2BGR, CV_Lab2LBGR, CV_Lab2LRGB, CV_Lab2RGB, + COLOR_Lab2BGR, COLOR_Lab2LBGR, COLOR_Lab2LRGB, COLOR_Lab2RGB, CX_Lab2BGRA, CX_Lab2LBGRA, CX_Lab2LRGBA, CX_Lab2RGBA, - CV_LBGR2Lab, CV_LBGR2Luv, CV_LRGB2Lab, CV_LRGB2Luv, + COLOR_LBGR2Lab, COLOR_LBGR2Luv, COLOR_LRGB2Lab, COLOR_LRGB2Luv, CX_LBGRA2Lab, CX_LBGRA2Luv, CX_LRGBA2Lab, CX_LRGBA2Luv, - CV_Luv2BGR, CV_Luv2LBGR, CV_Luv2LRGB, CV_Luv2RGB, + COLOR_Luv2BGR, COLOR_Luv2LBGR, COLOR_Luv2LRGB, COLOR_Luv2RGB, CX_Luv2BGRA, CX_Luv2LBGRA, CX_Luv2LRGBA, CX_Luv2RGBA, - CV_RGB2BGR555, CV_RGB2BGR565, CV_RGB2GRAY, - CV_RGB2HLS, CV_RGB2HLS_FULL, CV_RGB2HSV, CV_RGB2HSV_FULL, - CV_RGB2Lab, CV_RGB2Luv, CV_RGB2XYZ, CV_RGB2YCrCb, CV_RGB2YUV, + COLOR_RGB2BGR555, COLOR_RGB2BGR565, COLOR_RGB2GRAY, + COLOR_RGB2HLS, COLOR_RGB2HLS_FULL, COLOR_RGB2HSV, COLOR_RGB2HSV_FULL, + COLOR_RGB2Lab, COLOR_RGB2Luv, COLOR_RGB2XYZ, COLOR_RGB2YCrCb, COLOR_RGB2YUV, - CV_RGBA2BGR, CV_RGBA2BGR555, CV_RGBA2BGR565, CV_RGBA2GRAY, + COLOR_RGBA2BGR, COLOR_RGBA2BGR555, COLOR_RGBA2BGR565, COLOR_RGBA2GRAY, CX_RGBA2HLS, CX_RGBA2HLS_FULL, CX_RGBA2HSV, CX_RGBA2HSV_FULL, CX_RGBA2Lab, CX_RGBA2Luv, CX_RGBA2XYZ, CX_RGBA2YCrCb, CX_RGBA2YUV, - CV_XYZ2BGR, CV_XYZ2RGB, CX_XYZ2BGRA, CX_XYZ2RGBA, + COLOR_XYZ2BGR, COLOR_XYZ2RGB, CX_XYZ2BGRA, CX_XYZ2RGBA, - CV_YCrCb2BGR, CV_YCrCb2RGB, CX_YCrCb2BGRA, CX_YCrCb2RGBA, - CV_YUV2BGR, CV_YUV2RGB, CX_YUV2BGRA, CX_YUV2RGBA + COLOR_YCrCb2BGR, COLOR_YCrCb2RGB, CX_YCrCb2BGRA, CX_YCrCb2RGBA, + COLOR_YUV2BGR, COLOR_YUV2RGB, CX_YUV2BGRA, CX_YUV2RGBA ) CV_ENUM(CvtModeBayer, - CV_BayerBG2BGR, CV_BayerBG2BGR_VNG, CV_BayerBG2GRAY, - CV_BayerGB2BGR, CV_BayerGB2BGR_VNG, CV_BayerGB2GRAY, - CV_BayerGR2BGR, CV_BayerGR2BGR_VNG, CV_BayerGR2GRAY, - CV_BayerRG2BGR, CV_BayerRG2BGR_VNG, CV_BayerRG2GRAY + COLOR_BayerBG2BGR, COLOR_BayerBG2BGR_VNG, COLOR_BayerBG2GRAY, + COLOR_BayerGB2BGR, COLOR_BayerGB2BGR_VNG, COLOR_BayerGB2GRAY, + COLOR_BayerGR2BGR, COLOR_BayerGR2BGR_VNG, COLOR_BayerGR2GRAY, + COLOR_BayerRG2BGR, COLOR_BayerRG2BGR_VNG, COLOR_BayerRG2GRAY ) -CV_ENUM(CvtMode2, CV_YUV2BGR_NV12, CV_YUV2BGRA_NV12, CV_YUV2RGB_NV12, CV_YUV2RGBA_NV12, CV_YUV2BGR_NV21, CV_YUV2BGRA_NV21, CV_YUV2RGB_NV21, CV_YUV2RGBA_NV21, - CV_YUV2BGR_YV12, CV_YUV2BGRA_YV12, CV_YUV2RGB_YV12, CV_YUV2RGBA_YV12, CV_YUV2BGR_IYUV, CV_YUV2BGRA_IYUV, CV_YUV2RGB_IYUV, CV_YUV2RGBA_IYUV, - COLOR_YUV2GRAY_420, CV_YUV2RGB_UYVY, CV_YUV2BGR_UYVY, CV_YUV2RGBA_UYVY, CV_YUV2BGRA_UYVY, CV_YUV2RGB_YUY2, CV_YUV2BGR_YUY2, CV_YUV2RGB_YVYU, - CV_YUV2BGR_YVYU, CV_YUV2RGBA_YUY2, CV_YUV2BGRA_YUY2, CV_YUV2RGBA_YVYU, CV_YUV2BGRA_YVYU) +CV_ENUM(CvtMode2, COLOR_YUV2BGR_NV12, COLOR_YUV2BGRA_NV12, COLOR_YUV2RGB_NV12, COLOR_YUV2RGBA_NV12, COLOR_YUV2BGR_NV21, COLOR_YUV2BGRA_NV21, COLOR_YUV2RGB_NV21, COLOR_YUV2RGBA_NV21, + COLOR_YUV2BGR_YV12, COLOR_YUV2BGRA_YV12, COLOR_YUV2RGB_YV12, COLOR_YUV2RGBA_YV12, COLOR_YUV2BGR_IYUV, COLOR_YUV2BGRA_IYUV, COLOR_YUV2RGB_IYUV, COLOR_YUV2RGBA_IYUV, + COLOR_YUV2GRAY_420, COLOR_YUV2RGB_UYVY, COLOR_YUV2BGR_UYVY, COLOR_YUV2RGBA_UYVY, COLOR_YUV2BGRA_UYVY, COLOR_YUV2RGB_YUY2, COLOR_YUV2BGR_YUY2, COLOR_YUV2RGB_YVYU, + COLOR_YUV2BGR_YVYU, COLOR_YUV2RGBA_YUY2, COLOR_YUV2BGRA_YUY2, COLOR_YUV2RGBA_YVYU, COLOR_YUV2BGRA_YVYU) -CV_ENUM(CvtMode3, CV_RGB2YUV_IYUV, CV_BGR2YUV_IYUV, CV_RGBA2YUV_IYUV, CV_BGRA2YUV_IYUV, - CV_RGB2YUV_YV12, CV_BGR2YUV_YV12, CV_RGBA2YUV_YV12, CV_BGRA2YUV_YV12) +CV_ENUM(CvtMode3, COLOR_RGB2YUV_IYUV, COLOR_BGR2YUV_IYUV, COLOR_RGBA2YUV_IYUV, COLOR_BGRA2YUV_IYUV, + COLOR_RGB2YUV_YV12, COLOR_BGR2YUV_YV12, COLOR_RGBA2YUV_YV12, COLOR_BGRA2YUV_YV12) struct ChPair { @@ -128,74 +128,74 @@ ChPair getConversionInfo(int cvtMode) { switch(cvtMode) { - case CV_BayerBG2GRAY: case CV_BayerGB2GRAY: - case CV_BayerGR2GRAY: case CV_BayerRG2GRAY: - case CV_YUV2GRAY_420: + case COLOR_BayerBG2GRAY: case COLOR_BayerGB2GRAY: + case COLOR_BayerGR2GRAY: case COLOR_BayerRG2GRAY: + case COLOR_YUV2GRAY_420: return ChPair(1,1); - case CV_GRAY2BGR555: case CV_GRAY2BGR565: + case COLOR_GRAY2BGR555: case COLOR_GRAY2BGR565: return ChPair(1,2); - case CV_BayerBG2BGR: case CV_BayerBG2BGR_VNG: - case CV_BayerGB2BGR: case CV_BayerGB2BGR_VNG: - case CV_BayerGR2BGR: case CV_BayerGR2BGR_VNG: - case CV_BayerRG2BGR: case CV_BayerRG2BGR_VNG: - case CV_GRAY2BGR: - case CV_YUV2BGR_NV12: case CV_YUV2RGB_NV12: - case CV_YUV2BGR_NV21: case CV_YUV2RGB_NV21: - case CV_YUV2BGR_YV12: case CV_YUV2RGB_YV12: - case CV_YUV2BGR_IYUV: case CV_YUV2RGB_IYUV: + case COLOR_BayerBG2BGR: case COLOR_BayerBG2BGR_VNG: + case COLOR_BayerGB2BGR: case COLOR_BayerGB2BGR_VNG: + case COLOR_BayerGR2BGR: case COLOR_BayerGR2BGR_VNG: + case COLOR_BayerRG2BGR: case COLOR_BayerRG2BGR_VNG: + case COLOR_GRAY2BGR: + case COLOR_YUV2BGR_NV12: case COLOR_YUV2RGB_NV12: + case COLOR_YUV2BGR_NV21: case COLOR_YUV2RGB_NV21: + case COLOR_YUV2BGR_YV12: case COLOR_YUV2RGB_YV12: + case COLOR_YUV2BGR_IYUV: case COLOR_YUV2RGB_IYUV: return ChPair(1,3); - case CV_GRAY2BGRA: - case CV_YUV2BGRA_NV12: case CV_YUV2RGBA_NV12: - case CV_YUV2BGRA_NV21: case CV_YUV2RGBA_NV21: - case CV_YUV2BGRA_YV12: case CV_YUV2RGBA_YV12: - case CV_YUV2BGRA_IYUV: case CV_YUV2RGBA_IYUV: + case COLOR_GRAY2BGRA: + case COLOR_YUV2BGRA_NV12: case COLOR_YUV2RGBA_NV12: + case COLOR_YUV2BGRA_NV21: case COLOR_YUV2RGBA_NV21: + case COLOR_YUV2BGRA_YV12: case COLOR_YUV2RGBA_YV12: + case COLOR_YUV2BGRA_IYUV: case COLOR_YUV2RGBA_IYUV: return ChPair(1,4); - case CV_BGR5552GRAY: case CV_BGR5652GRAY: + case COLOR_BGR5552GRAY: case COLOR_BGR5652GRAY: return ChPair(2,1); - case CV_BGR5552BGR: case CV_BGR5552RGB: - case CV_BGR5652BGR: case CV_BGR5652RGB: - case CV_YUV2RGB_UYVY: case CV_YUV2BGR_UYVY: - case CV_YUV2RGBA_UYVY: case CV_YUV2BGRA_UYVY: - case CV_YUV2RGB_YUY2: case CV_YUV2BGR_YUY2: - case CV_YUV2RGB_YVYU: case CV_YUV2BGR_YVYU: - case CV_YUV2RGBA_YUY2: case CV_YUV2BGRA_YUY2: - case CV_YUV2RGBA_YVYU: case CV_YUV2BGRA_YVYU: + case COLOR_BGR5552BGR: case COLOR_BGR5552RGB: + case COLOR_BGR5652BGR: case COLOR_BGR5652RGB: + case COLOR_YUV2RGB_UYVY: case COLOR_YUV2BGR_UYVY: + case COLOR_YUV2RGBA_UYVY: case COLOR_YUV2BGRA_UYVY: + case COLOR_YUV2RGB_YUY2: case COLOR_YUV2BGR_YUY2: + case COLOR_YUV2RGB_YVYU: case COLOR_YUV2BGR_YVYU: + case COLOR_YUV2RGBA_YUY2: case COLOR_YUV2BGRA_YUY2: + case COLOR_YUV2RGBA_YVYU: case COLOR_YUV2BGRA_YVYU: return ChPair(2,3); - case CV_BGR5552BGRA: case CV_BGR5552RGBA: - case CV_BGR5652BGRA: case CV_BGR5652RGBA: + case COLOR_BGR5552BGRA: case COLOR_BGR5552RGBA: + case COLOR_BGR5652BGRA: case COLOR_BGR5652RGBA: return ChPair(2,4); - case CV_BGR2GRAY: case CV_RGB2GRAY: - case CV_RGB2YUV_IYUV: case CV_RGB2YUV_YV12: - case CV_BGR2YUV_IYUV: case CV_BGR2YUV_YV12: + case COLOR_BGR2GRAY: case COLOR_RGB2GRAY: + case COLOR_RGB2YUV_IYUV: case COLOR_RGB2YUV_YV12: + case COLOR_BGR2YUV_IYUV: case COLOR_BGR2YUV_YV12: return ChPair(3,1); - case CV_BGR2BGR555: case CV_BGR2BGR565: - case CV_RGB2BGR555: case CV_RGB2BGR565: + case COLOR_BGR2BGR555: case COLOR_BGR2BGR565: + case COLOR_RGB2BGR555: case COLOR_RGB2BGR565: return ChPair(3,2); - case CV_BGR2HLS: case CV_BGR2HLS_FULL: - case CV_BGR2HSV: case CV_BGR2HSV_FULL: - case CV_BGR2Lab: case CV_BGR2Luv: - case CV_BGR2RGB: case CV_BGR2XYZ: - case CV_BGR2YCrCb: case CV_BGR2YUV: - case CV_HLS2BGR: case CV_HLS2BGR_FULL: - case CV_HLS2RGB: case CV_HLS2RGB_FULL: - case CV_HSV2BGR: case CV_HSV2BGR_FULL: - case CV_HSV2RGB: case CV_HSV2RGB_FULL: - case CV_Lab2BGR: case CV_Lab2LBGR: - case CV_Lab2LRGB: case CV_Lab2RGB: - case CV_LBGR2Lab: case CV_LBGR2Luv: - case CV_LRGB2Lab: case CV_LRGB2Luv: - case CV_Luv2BGR: case CV_Luv2LBGR: - case CV_Luv2LRGB: case CV_Luv2RGB: - case CV_RGB2HLS: case CV_RGB2HLS_FULL: - case CV_RGB2HSV: case CV_RGB2HSV_FULL: - case CV_RGB2Lab: case CV_RGB2Luv: - case CV_RGB2XYZ: case CV_RGB2YCrCb: - case CV_RGB2YUV: case CV_XYZ2BGR: - case CV_XYZ2RGB: case CV_YCrCb2BGR: - case CV_YCrCb2RGB: case CV_YUV2BGR: - case CV_YUV2RGB: + case COLOR_BGR2HLS: case COLOR_BGR2HLS_FULL: + case COLOR_BGR2HSV: case COLOR_BGR2HSV_FULL: + case COLOR_BGR2Lab: case COLOR_BGR2Luv: + case COLOR_BGR2RGB: case COLOR_BGR2XYZ: + case COLOR_BGR2YCrCb: case COLOR_BGR2YUV: + case COLOR_HLS2BGR: case COLOR_HLS2BGR_FULL: + case COLOR_HLS2RGB: case COLOR_HLS2RGB_FULL: + case COLOR_HSV2BGR: case COLOR_HSV2BGR_FULL: + case COLOR_HSV2RGB: case COLOR_HSV2RGB_FULL: + case COLOR_Lab2BGR: case COLOR_Lab2LBGR: + case COLOR_Lab2LRGB: case COLOR_Lab2RGB: + case COLOR_LBGR2Lab: case COLOR_LBGR2Luv: + case COLOR_LRGB2Lab: case COLOR_LRGB2Luv: + case COLOR_Luv2BGR: case COLOR_Luv2LBGR: + case COLOR_Luv2LRGB: case COLOR_Luv2RGB: + case COLOR_RGB2HLS: case COLOR_RGB2HLS_FULL: + case COLOR_RGB2HSV: case COLOR_RGB2HSV_FULL: + case COLOR_RGB2Lab: case COLOR_RGB2Luv: + case COLOR_RGB2XYZ: case COLOR_RGB2YCrCb: + case COLOR_RGB2YUV: case COLOR_XYZ2BGR: + case COLOR_XYZ2RGB: case COLOR_YCrCb2BGR: + case COLOR_YCrCb2RGB: case COLOR_YUV2BGR: + case COLOR_YUV2RGB: return ChPair(3,3); - case CV_BGR2BGRA: case CV_BGR2RGBA: + case COLOR_BGR2BGRA: case COLOR_BGR2RGBA: case CX_HLS2BGRA: case CX_HLS2BGRA_FULL: case CX_HLS2RGBA: case CX_HLS2RGBA_FULL: case CX_HSV2BGRA: case CX_HSV2BGRA_FULL: @@ -208,27 +208,27 @@ ChPair getConversionInfo(int cvtMode) case CX_YCrCb2BGRA: case CX_YCrCb2RGBA: case CX_YUV2BGRA: case CX_YUV2RGBA: return ChPair(3,4); - case CV_BGRA2GRAY: case CV_RGBA2GRAY: - case CV_RGBA2YUV_IYUV: case CV_RGBA2YUV_YV12: - case CV_BGRA2YUV_IYUV: case CV_BGRA2YUV_YV12: + case COLOR_BGRA2GRAY: case COLOR_RGBA2GRAY: + case COLOR_RGBA2YUV_IYUV: case COLOR_RGBA2YUV_YV12: + case COLOR_BGRA2YUV_IYUV: case COLOR_BGRA2YUV_YV12: return ChPair(4,1); - case CV_BGRA2BGR555: case CV_BGRA2BGR565: - case CV_RGBA2BGR555: case CV_RGBA2BGR565: + case COLOR_BGRA2BGR555: case COLOR_BGRA2BGR565: + case COLOR_RGBA2BGR555: case COLOR_RGBA2BGR565: return ChPair(4,2); - case CV_BGRA2BGR: case CX_BGRA2HLS: + case COLOR_BGRA2BGR: case CX_BGRA2HLS: case CX_BGRA2HLS_FULL: case CX_BGRA2HSV: case CX_BGRA2HSV_FULL: case CX_BGRA2Lab: case CX_BGRA2Luv: case CX_BGRA2XYZ: case CX_BGRA2YCrCb: case CX_BGRA2YUV: case CX_LBGRA2Lab: case CX_LBGRA2Luv: case CX_LRGBA2Lab: case CX_LRGBA2Luv: - case CV_RGBA2BGR: case CX_RGBA2HLS: + case COLOR_RGBA2BGR: case CX_RGBA2HLS: case CX_RGBA2HLS_FULL: case CX_RGBA2HSV: case CX_RGBA2HSV_FULL: case CX_RGBA2Lab: case CX_RGBA2Luv: case CX_RGBA2XYZ: case CX_RGBA2YCrCb: case CX_RGBA2YUV: return ChPair(4,3); - case CV_BGRA2RGBA: + case COLOR_BGRA2RGBA: return ChPair(4,4); default: ADD_FAILURE() << "Unknown conversion type"; @@ -250,7 +250,7 @@ PERF_TEST_P(Size_CvtMode, cvtColor8u, Size sz = get<0>(GetParam()); int mode = get<1>(GetParam()); ChPair ch = getConversionInfo(mode); - mode %= CV_COLORCVT_MAX; + mode %= COLOR_COLORCVT_MAX; Mat src(sz, CV_8UC(ch.scn)); Mat dst(sz, CV_8UC(ch.dcn)); @@ -276,7 +276,7 @@ PERF_TEST_P(Size_CvtMode_Bayer, cvtColorBayer8u, Size sz = get<0>(GetParam()); int mode = get<1>(GetParam()); ChPair ch = getConversionInfo(mode); - mode %= CV_COLORCVT_MAX; + mode %= COLOR_COLORCVT_MAX; Mat src(sz, CV_8UC(ch.scn)); Mat dst(sz, CV_8UC(ch.dcn)); diff --git a/modules/imgproc/perf/perf_matchTemplate.cpp b/modules/imgproc/perf/perf_matchTemplate.cpp index a89435b535..3ca94ddee7 100644 --- a/modules/imgproc/perf/perf_matchTemplate.cpp +++ b/modules/imgproc/perf/perf_matchTemplate.cpp @@ -6,7 +6,7 @@ using namespace perf; using std::tr1::make_tuple; using std::tr1::get; -CV_ENUM(MethodType, CV_TM_SQDIFF, CV_TM_SQDIFF_NORMED, CV_TM_CCORR, CV_TM_CCORR_NORMED, CV_TM_CCOEFF, CV_TM_CCOEFF_NORMED) +CV_ENUM(MethodType, TM_SQDIFF, TM_SQDIFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_CCOEFF, TM_CCOEFF_NORMED) typedef std::tr1::tuple ImgSize_TmplSize_Method_t; typedef perf::TestBaseWithParam ImgSize_TmplSize_Method; @@ -39,9 +39,9 @@ PERF_TEST_P(ImgSize_TmplSize_Method, matchTemplateSmall, TEST_CYCLE() matchTemplate(img, tmpl, result, method); bool isNormed = - method == CV_TM_CCORR_NORMED || - method == CV_TM_SQDIFF_NORMED || - method == CV_TM_CCOEFF_NORMED; + method == TM_CCORR_NORMED || + method == TM_SQDIFF_NORMED || + method == TM_CCOEFF_NORMED; double eps = isNormed ? 1e-6 : 255 * 255 * tmpl.total() * 1e-6; @@ -73,9 +73,9 @@ PERF_TEST_P(ImgSize_TmplSize_Method, matchTemplateBig, TEST_CYCLE() matchTemplate(img, tmpl, result, method); bool isNormed = - method == CV_TM_CCORR_NORMED || - method == CV_TM_SQDIFF_NORMED || - method == CV_TM_CCOEFF_NORMED; + method == TM_CCORR_NORMED || + method == TM_SQDIFF_NORMED || + method == TM_CCOEFF_NORMED; double eps = isNormed ? 1e-6 : 255 * 255 * tmpl.total() * 1e-6; diff --git a/modules/imgproc/src/corner.cpp b/modules/imgproc/src/corner.cpp index fa3021ab8c..8f8c770068 100644 --- a/modules/imgproc/src/corner.cpp +++ b/modules/imgproc/src/corner.cpp @@ -162,7 +162,7 @@ calcHarris( const Mat& _cov, Mat& _dst, double k ) } -void eigen2x2( const float* cov, float* dst, int n ) +static void eigen2x2( const float* cov, float* dst, int n ) { for( int j = 0; j < n; j++ ) { diff --git a/modules/imgproc/src/histogram.cpp b/modules/imgproc/src/histogram.cpp index f768f5cdd5..6357147dfa 100644 --- a/modules/imgproc/src/histogram.cpp +++ b/modules/imgproc/src/histogram.cpp @@ -43,10 +43,6 @@ namespace cv { -template<> void Ptr::delete_obj() -{ cvReleaseHist(&obj); } - - ////////////////// Helper functions ////////////////////// static const size_t OUT_OF_RANGE = (size_t)1 << (sizeof(size_t)*8 - 2); diff --git a/modules/imgproc/src/moments.cpp b/modules/imgproc/src/moments.cpp index c5c9c218a5..edbc101014 100644 --- a/modules/imgproc/src/moments.cpp +++ b/modules/imgproc/src/moments.cpp @@ -354,25 +354,6 @@ Moments::Moments( double _m00, double _m10, double _m01, double _m20, double _m1 nu30 = mu30*s3; nu21 = mu21*s3; nu12 = mu12*s3; nu03 = mu03*s3; } -Moments::Moments( const CvMoments& m ) -{ - *this = Moments(m.m00, m.m10, m.m01, m.m20, m.m11, m.m02, m.m30, m.m21, m.m12, m.m03); -} - -Moments::operator CvMoments() const -{ - CvMoments m; - m.m00 = m00; m.m10 = m10; m.m01 = m01; - m.m20 = m20; m.m11 = m11; m.m02 = m02; - m.m30 = m30; m.m21 = m21; m.m12 = m12; m.m03 = m03; - m.mu20 = mu20; m.mu11 = mu11; m.mu02 = mu02; - m.mu30 = mu30; m.mu21 = mu21; m.mu12 = mu12; m.mu03 = mu03; - double am00 = std::abs(m00); - m.inv_sqrt_m00 = am00 > DBL_EPSILON ? 1./std::sqrt(am00) : 0; - - return m; -} - } diff --git a/modules/imgproc/src/morph.cpp b/modules/imgproc/src/morph.cpp index 4a939d9bb7..b4e08a6fb8 100644 --- a/modules/imgproc/src/morph.cpp +++ b/modules/imgproc/src/morph.cpp @@ -1191,9 +1191,6 @@ static void morphOp( int op, InputArray _src, OutputArray _dst, // f->apply( dst, dst ); } -template<> void Ptr::delete_obj() -{ cvReleaseStructuringElement(&obj); } - } void cv::erode( InputArray src, OutputArray dst, InputArray kernel, diff --git a/modules/legacy/include/opencv2/legacy.hpp b/modules/legacy/include/opencv2/legacy.hpp index 499566c054..29714f25eb 100644 --- a/modules/legacy/include/opencv2/legacy.hpp +++ b/modules/legacy/include/opencv2/legacy.hpp @@ -42,7 +42,6 @@ #ifndef __OPENCV_LEGACY_HPP__ #define __OPENCV_LEGACY_HPP__ -#include "opencv2/imgproc.hpp" #include "opencv2/imgproc/imgproc_c.h" #include "opencv2/features2d.hpp" #include "opencv2/calib3d.hpp" diff --git a/modules/legacy/include/opencv2/legacy/legacy.hpp b/modules/legacy/include/opencv2/legacy/legacy.hpp index fafad10a1c..adea8da005 100644 --- a/modules/legacy/include/opencv2/legacy/legacy.hpp +++ b/modules/legacy/include/opencv2/legacy/legacy.hpp @@ -45,4 +45,4 @@ #error this is a compatibility header which should not be used inside the OpenCV library #endif -#include "opencv2/legacy.hpp" \ No newline at end of file +#include "opencv2/legacy.hpp" diff --git a/modules/nonfree/test/test_detectors.cpp b/modules/nonfree/test/test_detectors.cpp index e675d3f99e..60d1bc1de3 100644 --- a/modules/nonfree/test/test_detectors.cpp +++ b/modules/nonfree/test/test_detectors.cpp @@ -113,7 +113,7 @@ void showOrig(const Mat& img, const vector& orig_pts) { Mat img_color; - cvtColor(img, img_color, CV_GRAY2BGR); + cvtColor(img, img_color, COLOR_GRAY2BGR); for(size_t i = 0; i < orig_pts.size(); ++i) circle(img_color, orig_pts[i].pt, (int)orig_pts[i].size/2, CV_RGB(0, 255, 0)); @@ -125,7 +125,7 @@ void show(const string& name, const Mat& new_img, const vector& new_pt { Mat new_img_color; - cvtColor(new_img, new_img_color, CV_GRAY2BGR); + cvtColor(new_img, new_img_color, COLOR_GRAY2BGR); for(size_t i = 0; i < transf_pts.size(); ++i) circle(new_img_color, transf_pts[i].pt, (int)transf_pts[i].size/2, CV_RGB(255, 0, 0)); diff --git a/modules/objdetect/test/test_cascadeandhog.cpp b/modules/objdetect/test/test_cascadeandhog.cpp index 9fb1006221..1366fa2f3b 100644 --- a/modules/objdetect/test/test_cascadeandhog.cpp +++ b/modules/objdetect/test/test_cascadeandhog.cpp @@ -434,7 +434,7 @@ int CV_CascadeDetectorTest::detectMultiScale_C( const string& filename, return cvtest::TS::FAIL_INVALID_TEST_DATA; } Mat grayImg; - cvtColor( img, grayImg, CV_BGR2GRAY ); + cvtColor( img, grayImg, COLOR_BGR2GRAY ); equalizeHist( grayImg, grayImg ); CvMat c_gray = grayImg; @@ -469,7 +469,7 @@ int CV_CascadeDetectorTest::detectMultiScale( int di, const Mat& img, return cvtest::TS::FAIL_INVALID_TEST_DATA; } Mat grayImg; - cvtColor( img, grayImg, CV_BGR2GRAY ); + cvtColor( img, grayImg, COLOR_BGR2GRAY ); equalizeHist( grayImg, grayImg ); cascade.detectMultiScale( grayImg, objects, 1.1, 3, flags[di] ); return cvtest::TS::OK; diff --git a/modules/ocl/include/opencv2/ocl/private/util.hpp b/modules/ocl/include/opencv2/ocl/private/util.hpp index 83992b1fd7..982084b7e0 100644 --- a/modules/ocl/include/opencv2/ocl/private/util.hpp +++ b/modules/ocl/include/opencv2/ocl/private/util.hpp @@ -46,14 +46,14 @@ #ifndef __OPENCV_OCL_PRIVATE_UTIL__ #define __OPENCV_OCL_PRIVATE_UTIL__ -#include "opencv2/ocl.hpp" - #if defined __APPLE__ #include #else #include #endif +#include "opencv2/ocl.hpp" + namespace cv { namespace ocl diff --git a/modules/ocl/perf/perf_color.cpp b/modules/ocl/perf/perf_color.cpp index e32a1839d8..1e05013aca 100644 --- a/modules/ocl/perf/perf_color.cpp +++ b/modules/ocl/perf/perf_color.cpp @@ -60,26 +60,26 @@ TEST(cvtColor) gen(src, size, size, all_type[j], 0, 256); SUBTEST << size << "x" << size << "; " << type_name[j] << " ; CV_RGBA2GRAY"; - cvtColor(src, dst, CV_RGBA2GRAY, 4); + cvtColor(src, dst, COLOR_RGBA2GRAY, 4); CPU_ON; - cvtColor(src, dst, CV_RGBA2GRAY, 4); + cvtColor(src, dst, COLOR_RGBA2GRAY, 4); CPU_OFF; d_src.upload(src); WARMUP_ON; - ocl::cvtColor(d_src, d_dst, CV_RGBA2GRAY, 4); + ocl::cvtColor(d_src, d_dst, COLOR_RGBA2GRAY, 4); WARMUP_OFF; GPU_ON; - ocl::cvtColor(d_src, d_dst, CV_RGBA2GRAY, 4); + ocl::cvtColor(d_src, d_dst, COLOR_RGBA2GRAY, 4); ; GPU_OFF; GPU_FULL_ON; d_src.upload(src); - ocl::cvtColor(d_src, d_dst, CV_RGBA2GRAY, 4); + ocl::cvtColor(d_src, d_dst, COLOR_RGBA2GRAY, 4); d_dst.download(dst); GPU_FULL_OFF; } diff --git a/modules/ocl/perf/perf_match_template.cpp b/modules/ocl/perf/perf_match_template.cpp index 2828efe01a..a0c94e8e47 100644 --- a/modules/ocl/perf/perf_match_template.cpp +++ b/modules/ocl/perf/perf_match_template.cpp @@ -75,10 +75,10 @@ TEST(matchTemplate) gen(templ, templ_size, templ_size, all_type[j], 0, 1); - matchTemplate(src, templ, dst, CV_TM_CCORR); + matchTemplate(src, templ, dst, TM_CCORR); CPU_ON; - matchTemplate(src, templ, dst, CV_TM_CCORR); + matchTemplate(src, templ, dst, TM_CCORR); CPU_OFF; ocl::oclMat d_src(src), d_templ, d_dst; @@ -86,18 +86,18 @@ TEST(matchTemplate) d_templ.upload(templ); WARMUP_ON; - ocl::matchTemplate(d_src, d_templ, d_dst, CV_TM_CCORR); + ocl::matchTemplate(d_src, d_templ, d_dst, TM_CCORR); WARMUP_OFF; GPU_ON; - ocl::matchTemplate(d_src, d_templ, d_dst, CV_TM_CCORR); + ocl::matchTemplate(d_src, d_templ, d_dst, TM_CCORR); ; GPU_OFF; GPU_FULL_ON; d_src.upload(src); d_templ.upload(templ); - ocl::matchTemplate(d_src, d_templ, d_dst, CV_TM_CCORR); + ocl::matchTemplate(d_src, d_templ, d_dst, TM_CCORR); d_dst.download(dst); GPU_FULL_OFF; } @@ -116,28 +116,28 @@ TEST(matchTemplate) gen(templ, templ_size, templ_size, all_type_8U[j], 0, 255); - matchTemplate(src, templ, dst, CV_TM_CCORR_NORMED); + matchTemplate(src, templ, dst, TM_CCORR_NORMED); CPU_ON; - matchTemplate(src, templ, dst, CV_TM_CCORR_NORMED); + matchTemplate(src, templ, dst, TM_CCORR_NORMED); CPU_OFF; ocl::oclMat d_src(src); ocl::oclMat d_templ(templ), d_dst; WARMUP_ON; - ocl::matchTemplate(d_src, d_templ, d_dst, CV_TM_CCORR_NORMED); + ocl::matchTemplate(d_src, d_templ, d_dst, TM_CCORR_NORMED); WARMUP_OFF; GPU_ON; - ocl::matchTemplate(d_src, d_templ, d_dst, CV_TM_CCORR_NORMED); + ocl::matchTemplate(d_src, d_templ, d_dst, TM_CCORR_NORMED); ; GPU_OFF; GPU_FULL_ON; d_src.upload(src); d_templ.upload(templ); - ocl::matchTemplate(d_src, d_templ, d_dst, CV_TM_CCORR_NORMED); + ocl::matchTemplate(d_src, d_templ, d_dst, TM_CCORR_NORMED); d_dst.download(dst); GPU_FULL_OFF; } diff --git a/modules/ocl/test/test_color.cpp b/modules/ocl/test/test_color.cpp index 202967b7a6..d70535dca5 100644 --- a/modules/ocl/test/test_color.cpp +++ b/modules/ocl/test/test_color.cpp @@ -44,6 +44,9 @@ //M*/ #include "precomp.hpp" + +using namespace cv; + #ifdef HAVE_OPENCL //#define MAT_DEBUG @@ -181,13 +184,13 @@ INSTANTIATE_TEST_CASE_P(OCL_ImgProc, CvtColor, testing::Combine( INSTANTIATE_TEST_CASE_P(OCL_ImgProc, CvtColor_YUV420, testing::Combine( testing::Values(cv::Size(128, 45), cv::Size(46, 132), cv::Size(1024, 1023)), - testing::Values((int)CV_YUV2RGBA_NV12, (int)CV_YUV2BGRA_NV12, (int)CV_YUV2RGB_NV12, (int)CV_YUV2BGR_NV12) + testing::Values((int)COLOR_YUV2RGBA_NV12, (int)COLOR_YUV2BGRA_NV12, (int)COLOR_YUV2RGB_NV12, (int)COLOR_YUV2BGR_NV12) )); INSTANTIATE_TEST_CASE_P(OCL_ImgProc, CvtColor_Gray2RGB, testing::Combine( DIFFERENT_SIZES, testing::Values(MatDepth(CV_8U), MatDepth(CV_16U), MatDepth(CV_32F)), - testing::Values((int)CV_GRAY2BGR, (int)CV_GRAY2BGRA, (int)CV_GRAY2RGB, (int)CV_GRAY2RGBA) + testing::Values((int)COLOR_GRAY2BGR, (int)COLOR_GRAY2BGRA, (int)COLOR_GRAY2RGB, (int)COLOR_GRAY2RGBA) )); } #endif diff --git a/modules/ocl/test/test_haar.cpp b/modules/ocl/test/test_haar.cpp index 6d2a912c24..b10d34b525 100644 --- a/modules/ocl/test/test_haar.cpp +++ b/modules/ocl/test/test_haar.cpp @@ -121,7 +121,7 @@ TEST_F(Haar, FaceDetect) Mat gray, smallImg(cvRound (img.rows / scale), cvRound(img.cols / scale), CV_8UC1 ); MemStorage storage(cvCreateMemStorage(0)); - cvtColor( img, gray, CV_BGR2GRAY ); + cvtColor( img, gray, COLOR_BGR2GRAY ); resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR ); equalizeHist( smallImg, smallImg ); diff --git a/modules/ocl/test/test_hog.cpp b/modules/ocl/test/test_hog.cpp index 9c27c88032..f064ee3a76 100644 --- a/modules/ocl/test/test_hog.cpp +++ b/modules/ocl/test/test_hog.cpp @@ -44,7 +44,7 @@ //M*/ #include "precomp.hpp" -#include "opencv2/core.hpp" + using namespace std; #ifdef HAVE_OPENCL @@ -71,11 +71,11 @@ TEST_P(HOG, GetDescriptors) switch (type) { case CV_8UC1: - cv::cvtColor(img_rgb, img, CV_BGR2GRAY); + cv::cvtColor(img_rgb, img, cv::COLOR_BGR2GRAY); break; case CV_8UC4: default: - cv::cvtColor(img_rgb, img, CV_BGR2BGRA); + cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA); break; } cv::ocl::oclMat d_img(img); @@ -128,11 +128,11 @@ TEST_P(HOG, Detect) switch (type) { case CV_8UC1: - cv::cvtColor(img_rgb, img, CV_BGR2GRAY); + cv::cvtColor(img_rgb, img, cv::COLOR_BGR2GRAY); break; case CV_8UC4: default: - cv::cvtColor(img_rgb, img, CV_BGR2BGRA); + cv::cvtColor(img_rgb, img, cv::COLOR_BGR2BGRA); break; } cv::ocl::oclMat d_img(img); diff --git a/modules/ocl/test/utility.cpp b/modules/ocl/test/utility.cpp index 4b21081a8b..9873a88553 100644 --- a/modules/ocl/test/utility.cpp +++ b/modules/ocl/test/utility.cpp @@ -245,7 +245,7 @@ double checkNorm(const Mat &m1, const Mat &m2) double checkSimilarity(const Mat &m1, const Mat &m2) { Mat diff; - matchTemplate(m1, m2, diff, CV_TM_CCORR_NORMED); + matchTemplate(m1, m2, diff, TM_CCORR_NORMED); return std::abs(diff.at(0, 0) - 1.f); } diff --git a/modules/photo/src/denoising.cpp b/modules/photo/src/denoising.cpp index 299390f154..b2349ba273 100644 --- a/modules/photo/src/denoising.cpp +++ b/modules/photo/src/denoising.cpp @@ -93,7 +93,7 @@ void cv::fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst, } Mat src_lab; - cvtColor(src, src_lab, CV_LBGR2Lab); + cvtColor(src, src_lab, COLOR_LBGR2Lab); Mat l(src.size(), CV_8U); Mat ab(src.size(), CV_8UC2); @@ -108,7 +108,7 @@ void cv::fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst, Mat dst_lab(src.size(), src.type()); mixChannels(l_ab_denoised, 2, &dst_lab, 1, from_to, 3); - cvtColor(dst_lab, dst, CV_Lab2LBGR); + cvtColor(dst_lab, dst, COLOR_Lab2LBGR); } static void fastNlMeansDenoisingMultiCheckPreconditions( @@ -215,7 +215,7 @@ void cv::fastNlMeansDenoisingColoredMulti( InputArrayOfArrays _srcImgs, OutputAr src_lab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC3); l[i] = Mat::zeros(srcImgs[0].size(), CV_8UC1); ab[i] = Mat::zeros(srcImgs[0].size(), CV_8UC2); - cvtColor(srcImgs[i], src_lab[i], CV_LBGR2Lab); + cvtColor(srcImgs[i], src_lab[i], COLOR_LBGR2Lab); Mat l_ab[] = { l[i], ab[i] }; mixChannels(&src_lab[i], 1, l_ab, 2, from_to, 3); @@ -236,7 +236,7 @@ void cv::fastNlMeansDenoisingColoredMulti( InputArrayOfArrays _srcImgs, OutputAr Mat dst_lab(srcImgs[0].size(), srcImgs[0].type()); mixChannels(l_ab_denoised, 2, &dst_lab, 1, from_to, 3); - cvtColor(dst_lab, dst, CV_Lab2LBGR); + cvtColor(dst_lab, dst, COLOR_Lab2LBGR); } diff --git a/modules/photo/src/inpaint.cpp b/modules/photo/src/inpaint.cpp index 672e6c05ab..ce05fc3d1a 100644 --- a/modules/photo/src/inpaint.cpp +++ b/modules/photo/src/inpaint.cpp @@ -716,6 +716,12 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue }\ } +namespace cv { +template<> void cv::Ptr::delete_obj() +{ + cvReleaseStructuringElement(&obj); +} +} void cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_img, diff --git a/modules/photo/test/test_inpaint.cpp b/modules/photo/test/test_inpaint.cpp index 2bdb2ddaa5..ef7132d6da 100644 --- a/modules/photo/test/test_inpaint.cpp +++ b/modules/photo/test/test_inpaint.cpp @@ -78,7 +78,7 @@ void CV_InpaintTest::run( int ) mask.convertTo(inv_mask, CV_8UC3, -1.0, 255.0); Mat mask1ch; - cv::cvtColor(mask, mask1ch, CV_BGR2GRAY); + cv::cvtColor(mask, mask1ch, COLOR_BGR2GRAY); Mat test = orig.clone(); test.setTo(Scalar::all(255), mask1ch); diff --git a/modules/stitching/src/blenders.cpp b/modules/stitching/src/blenders.cpp index dc7d1da618..a91e39b480 100644 --- a/modules/stitching/src/blenders.cpp +++ b/modules/stitching/src/blenders.cpp @@ -425,7 +425,7 @@ void normalizeUsingWeightMap(const Mat& weight, Mat& src) void createWeightMap(const Mat &mask, float sharpness, Mat &weight) { CV_Assert(mask.type() == CV_8U); - distanceTransform(mask, weight, CV_DIST_L1, 3); + distanceTransform(mask, weight, DIST_L1, 3); threshold(weight * sharpness, weight, 1.f, 1.f, THRESH_TRUNC); } diff --git a/modules/stitching/src/matchers.cpp b/modules/stitching/src/matchers.cpp index b564193eb3..65f2a0030e 100644 --- a/modules/stitching/src/matchers.cpp +++ b/modules/stitching/src/matchers.cpp @@ -350,7 +350,7 @@ void SurfFeaturesFinder::find(const Mat &image, ImageFeatures &features) CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC1)); if(image.type() == CV_8UC3) { - cvtColor(image, gray_image, CV_BGR2GRAY); + cvtColor(image, gray_image, COLOR_BGR2GRAY); } else { @@ -382,9 +382,9 @@ void OrbFeaturesFinder::find(const Mat &image, ImageFeatures &features) CV_Assert((image.type() == CV_8UC3) || (image.type() == CV_8UC4) || (image.type() == CV_8UC1)); if (image.type() == CV_8UC3) { - cvtColor(image, gray_image, CV_BGR2GRAY); + cvtColor(image, gray_image, COLOR_BGR2GRAY); } else if (image.type() == CV_8UC4) { - cvtColor(image, gray_image, CV_BGRA2GRAY); + cvtColor(image, gray_image, COLOR_BGRA2GRAY); } else if (image.type() == CV_8UC1) { gray_image=image; } else { @@ -457,7 +457,7 @@ void SurfFeaturesFinderGpu::find(const Mat &image, ImageFeatures &features) image_.upload(image); ensureSizeIsEnough(image.size(), CV_8UC1, gray_image_); - cvtColor(image_, gray_image_, CV_BGR2GRAY); + cvtColor(image_, gray_image_, COLOR_BGR2GRAY); surf_.nOctaves = num_octaves_; surf_.nOctaveLayers = num_layers_; diff --git a/modules/stitching/src/seam_finders.cpp b/modules/stitching/src/seam_finders.cpp index d55fc70b78..3ecef1c787 100644 --- a/modules/stitching/src/seam_finders.cpp +++ b/modules/stitching/src/seam_finders.cpp @@ -139,8 +139,8 @@ void VoronoiSeamFinder::findInPair(size_t first, size_t second, Rect roi) Mat unique2 = submask2.clone(); unique2.setTo(0, collision); Mat dist1, dist2; - distanceTransform(unique1 == 0, dist1, CV_DIST_L1, 3); - distanceTransform(unique2 == 0, dist2, CV_DIST_L1, 3); + distanceTransform(unique1 == 0, dist1, DIST_L1, 3); + distanceTransform(unique2 == 0, dist2, DIST_L1, 3); Mat seam = dist1 < dist2; @@ -522,17 +522,17 @@ void DpSeamFinder::computeGradients(const Mat &image1, const Mat &image2) Mat gray; if (image1.channels() == 3) - cvtColor(image1, gray, CV_BGR2GRAY); + cvtColor(image1, gray, COLOR_BGR2GRAY); else if (image1.channels() == 4) - cvtColor(image1, gray, CV_BGRA2GRAY); + cvtColor(image1, gray, COLOR_BGRA2GRAY); Sobel(gray, gradx1_, CV_32F, 1, 0); Sobel(gray, grady1_, CV_32F, 0, 1); if (image2.channels() == 3) - cvtColor(image2, gray, CV_BGR2GRAY); + cvtColor(image2, gray, COLOR_BGR2GRAY); else if (image2.channels() == 4) - cvtColor(image2, gray, CV_BGRA2GRAY); + cvtColor(image2, gray, COLOR_BGRA2GRAY); Sobel(gray, gradx2_, CV_32F, 1, 0); Sobel(gray, grady2_, CV_32F, 0, 1); diff --git a/modules/ts/src/gpu_test.cpp b/modules/ts/src/gpu_test.cpp index 6f839c341c..4847dbc383 100644 --- a/modules/ts/src/gpu_test.cpp +++ b/modules/ts/src/gpu_test.cpp @@ -330,7 +330,7 @@ namespace cvtest double checkSimilarity(InputArray m1, InputArray m2) { Mat diff; - matchTemplate(getMat(m1), getMat(m2), diff, CV_TM_CCORR_NORMED); + matchTemplate(getMat(m1), getMat(m2), diff, TM_CCORR_NORMED); return std::abs(diff.at(0, 0) - 1.f); } diff --git a/modules/video/include/opencv2/video/tracking.hpp b/modules/video/include/opencv2/video/tracking.hpp index dbccfae748..af3c423f50 100644 --- a/modules/video/include/opencv2/video/tracking.hpp +++ b/modules/video/include/opencv2/video/tracking.hpp @@ -45,8 +45,10 @@ #ifdef __cplusplus # include "opencv2/core.hpp" +# include "opencv2/imgproc.hpp" #endif -#include "opencv2/imgproc.hpp" + +#include "opencv2/imgproc/imgproc_c.h" #ifdef __cplusplus extern "C" { diff --git a/samples/c/delaunay.c b/samples/c/delaunay.c index 21fe340a3a..1f09efef90 100644 --- a/samples/c/delaunay.c +++ b/samples/c/delaunay.c @@ -1,6 +1,6 @@ #include -#include -#include "opencv2/highgui/highgui.hpp" +#include +#include #include static void help( void ) diff --git a/samples/c/facedetect.cpp b/samples/c/facedetect.cpp index 8976a9cdcd..7d02ac95dd 100644 --- a/samples/c/facedetect.cpp +++ b/samples/c/facedetect.cpp @@ -207,7 +207,7 @@ void detectAndDraw( Mat& img, CascadeClassifier& cascade, CV_RGB(255,0,255)} ; Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 ); - cvtColor( img, gray, CV_BGR2GRAY ); + cvtColor( img, gray, COLOR_BGR2GRAY ); resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR ); equalizeHist( smallImg, smallImg ); diff --git a/samples/c/polar_transforms.c b/samples/c/polar_transforms.c index d6915d488c..8ce4831637 100644 --- a/samples/c/polar_transforms.c +++ b/samples/c/polar_transforms.c @@ -1,7 +1,5 @@ -#include "opencv2/imgproc/imgproc.hpp" -#include "opencv2/highgui/highgui.hpp" - #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/highgui/highgui_c.h" #include #include diff --git a/samples/c/pyramid_segmentation.c b/samples/c/pyramid_segmentation.c index 780dc9f301..c15c7cd0c0 100644 --- a/samples/c/pyramid_segmentation.c +++ b/samples/c/pyramid_segmentation.c @@ -1,7 +1,6 @@ -#include "opencv2/imgproc/imgproc.hpp" -#include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc_c.h" -#include "opencv2/legacy/legacy.hpp" +#include "opencv2/highgui/highgui_c.h" +#include "opencv2/legacy.hpp" #include static void help(void) diff --git a/samples/c/smiledetect.cpp b/samples/c/smiledetect.cpp index 214ae94127..07c482147f 100644 --- a/samples/c/smiledetect.cpp +++ b/samples/c/smiledetect.cpp @@ -167,7 +167,7 @@ void detectAndDraw( Mat& img, CascadeClassifier& cascade, CV_RGB(255,0,255)} ; Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 ); - cvtColor( img, gray, CV_BGR2GRAY ); + cvtColor( img, gray, COLOR_BGR2GRAY ); resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR ); equalizeHist( smallImg, smallImg ); diff --git a/samples/cpp/3calibration.cpp b/samples/cpp/3calibration.cpp index 061e977952..24e9e2686e 100644 --- a/samples/cpp/3calibration.cpp +++ b/samples/cpp/3calibration.cpp @@ -271,7 +271,7 @@ int main( int argc, char** argv ) { vector ptvec; imageSize = view.size(); - cvtColor(view, viewGray, CV_BGR2GRAY); + cvtColor(view, viewGray, COLOR_BGR2GRAY); bool found = findChessboardCorners( view, boardSize, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH ); drawChessboardCorners( view, boardSize, Mat(ptvec), found ); @@ -359,7 +359,7 @@ int main( int argc, char** argv ) continue; Mat rview = canvas.colRange(k2*imageSize.width, (k2+1)*imageSize.width); - remap(view, rview, map1[k1], map2[k1], CV_INTER_LINEAR); + remap(view, rview, map1[k1], map2[k1], INTER_LINEAR); } printf("%s %s %s\n", imageList[i*3].c_str(), imageList[i*3+1].c_str(), imageList[i*3+2].c_str()); resize( canvas, small_canvas, Size(1500, 1500/3) ); diff --git a/samples/cpp/build3dmodel.cpp b/samples/cpp/build3dmodel.cpp index 5535a5fb8c..7150064a47 100644 --- a/samples/cpp/build3dmodel.cpp +++ b/samples/cpp/build3dmodel.cpp @@ -509,7 +509,7 @@ static void build3dmodel( const Ptr& detector, for( size_t i = 0; i < nimages; i++ ) { Mat img = imread(imageList[i], 1), gray; - cvtColor(img, gray, CV_BGR2GRAY); + cvtColor(img, gray, COLOR_BGR2GRAY); vector keypoints; detector->detect(gray, keypoints); diff --git a/samples/cpp/calibration.cpp b/samples/cpp/calibration.cpp index 8cbf6c5387..8cb982912c 100644 --- a/samples/cpp/calibration.cpp +++ b/samples/cpp/calibration.cpp @@ -462,7 +462,7 @@ int main( int argc, char** argv ) flip( view, view, 0 ); vector pointbuf; - cvtColor(view, viewGray, CV_BGR2GRAY); + cvtColor(view, viewGray, COLOR_BGR2GRAY); bool found; switch( pattern ) diff --git a/samples/cpp/chamfer.cpp b/samples/cpp/chamfer.cpp index a7d3578d4d..ab211f33a5 100644 --- a/samples/cpp/chamfer.cpp +++ b/samples/cpp/chamfer.cpp @@ -41,7 +41,7 @@ int main( int argc, const char** argv ) return -1; } Mat cimg; - cvtColor(img, cimg, CV_GRAY2BGR); + cvtColor(img, cimg, COLOR_GRAY2BGR); // if the image and the template are not edge maps but normal grayscale images, // you might want to uncomment the lines below to produce the maps. You can also diff --git a/samples/cpp/detection_based_tracker_sample.cpp b/samples/cpp/detection_based_tracker_sample.cpp index 50dffea569..1debff7e3f 100644 --- a/samples/cpp/detection_based_tracker_sample.cpp +++ b/samples/cpp/detection_based_tracker_sample.cpp @@ -144,7 +144,7 @@ static int test_FaceDetector(int argc, char *argv[]) LOGD("\n\nSTEP n=%d from prev step %f ms\n", n, t_ms); m=images[n-1]; CV_Assert(! m.empty()); - cvtColor(m, gray, CV_BGR2GRAY); + cvtColor(m, gray, COLOR_BGR2GRAY); fd.process(gray); diff --git a/samples/cpp/distrans.cpp b/samples/cpp/distrans.cpp index 71ed71a984..1bd85bbc2b 100644 --- a/samples/cpp/distrans.cpp +++ b/samples/cpp/distrans.cpp @@ -7,10 +7,10 @@ using namespace std; using namespace cv; -int maskSize0 = CV_DIST_MASK_5; +int maskSize0 = DIST_MASK_5; int voronoiType = -1; int edgeThresh = 100; -int distType0 = CV_DIST_L1; +int distType0 = DIST_L1; // The output and temporary images Mat gray; @@ -31,8 +31,8 @@ static void onTrackbar( int, void* ) Scalar(255,0,255) }; - int maskSize = voronoiType >= 0 ? CV_DIST_MASK_5 : maskSize0; - int distType = voronoiType >= 0 ? CV_DIST_L2 : distType0; + int maskSize = voronoiType >= 0 ? DIST_MASK_5 : maskSize0; + int distType = voronoiType >= 0 ? DIST_L2 : distType0; Mat edge = gray >= edgeThresh, dist, labels, dist8u; @@ -140,17 +140,17 @@ int main( int argc, const char** argv ) voronoiType = -1; if( c == 'c' || c == 'C' ) - distType0 = CV_DIST_C; + distType0 = DIST_C; else if( c == '1' ) - distType0 = CV_DIST_L1; + distType0 = DIST_L1; else if( c == '2' ) - distType0 = CV_DIST_L2; + distType0 = DIST_L2; else if( c == '3' ) - maskSize0 = CV_DIST_MASK_3; + maskSize0 = DIST_MASK_3; else if( c == '5' ) - maskSize0 = CV_DIST_MASK_5; + maskSize0 = DIST_MASK_5; else if( c == '0' ) - maskSize0 = CV_DIST_MASK_PRECISE; + maskSize0 = DIST_MASK_PRECISE; else if( c == 'v' ) voronoiType = 0; else if( c == 'p' ) @@ -162,18 +162,18 @@ int main( int argc, const char** argv ) else if( voronoiType == 1 ) { voronoiType = -1; - maskSize0 = CV_DIST_MASK_3; - distType0 = CV_DIST_C; + maskSize0 = DIST_MASK_3; + distType0 = DIST_C; } - else if( distType0 == CV_DIST_C ) - distType0 = CV_DIST_L1; - else if( distType0 == CV_DIST_L1 ) - distType0 = CV_DIST_L2; - else if( maskSize0 == CV_DIST_MASK_3 ) - maskSize0 = CV_DIST_MASK_5; - else if( maskSize0 == CV_DIST_MASK_5 ) - maskSize0 = CV_DIST_MASK_PRECISE; - else if( maskSize0 == CV_DIST_MASK_PRECISE ) + else if( distType0 == DIST_C ) + distType0 = DIST_L1; + else if( distType0 == DIST_L1 ) + distType0 = DIST_L2; + else if( maskSize0 == DIST_MASK_3 ) + maskSize0 = DIST_MASK_5; + else if( maskSize0 == DIST_MASK_5 ) + maskSize0 = DIST_MASK_PRECISE; + else if( maskSize0 == DIST_MASK_PRECISE ) voronoiType = 0; } } diff --git a/samples/cpp/edge.cpp b/samples/cpp/edge.cpp index 0944c90ee2..262833c672 100644 --- a/samples/cpp/edge.cpp +++ b/samples/cpp/edge.cpp @@ -50,7 +50,7 @@ int main( int argc, const char** argv ) return -1; } cedge.create(image.size(), image.type()); - cvtColor(image, gray, CV_BGR2GRAY); + cvtColor(image, gray, COLOR_BGR2GRAY); // Create a window namedWindow("Edge map", 1); diff --git a/samples/cpp/ffilldemo.cpp b/samples/cpp/ffilldemo.cpp index a7fff778fd..bb207c307e 100644 --- a/samples/cpp/ffilldemo.cpp +++ b/samples/cpp/ffilldemo.cpp @@ -41,7 +41,7 @@ static void onMouse( int event, int x, int y, int, void* ) int lo = ffillMode == 0 ? 0 : loDiff; int up = ffillMode == 0 ? 0 : upDiff; int flags = connectivity + (newMaskVal << 8) + - (ffillMode == 1 ? CV_FLOODFILL_FIXED_RANGE : 0); + (ffillMode == 1 ? FLOODFILL_FIXED_RANGE : 0); int b = (unsigned)theRNG() & 255; int g = (unsigned)theRNG() & 255; int r = (unsigned)theRNG() & 255; @@ -53,7 +53,7 @@ static void onMouse( int event, int x, int y, int, void* ) if( useMask ) { - threshold(mask, mask, 1, 128, CV_THRESH_BINARY); + threshold(mask, mask, 1, 128, THRESH_BINARY); area = floodFill(dst, mask, seed, newVal, &ccomp, Scalar(lo, lo, lo), Scalar(up, up, up), flags); imshow( "mask", mask ); @@ -81,7 +81,7 @@ int main( int argc, char** argv ) } help(); image0.copyTo(image); - cvtColor(image0, gray, CV_BGR2GRAY); + cvtColor(image0, gray, COLOR_BGR2GRAY); mask.create(image0.rows+2, image0.cols+2, CV_8UC1); namedWindow( "image", 0 ); @@ -106,7 +106,7 @@ int main( int argc, char** argv ) if( isColor ) { cout << "Grayscale mode is set\n"; - cvtColor(image0, gray, CV_BGR2GRAY); + cvtColor(image0, gray, COLOR_BGR2GRAY); mask = Scalar::all(0); isColor = false; } @@ -135,7 +135,7 @@ int main( int argc, char** argv ) case 'r': cout << "Original image is restored\n"; image0.copyTo(image); - cvtColor(image, gray, CV_BGR2GRAY); + cvtColor(image, gray, COLOR_BGR2GRAY); mask = Scalar::all(0); break; case 's': diff --git a/samples/cpp/fitellipse.cpp b/samples/cpp/fitellipse.cpp index 55f9e4ab06..2fa62b8e46 100644 --- a/samples/cpp/fitellipse.cpp +++ b/samples/cpp/fitellipse.cpp @@ -64,7 +64,7 @@ void processImage(int /*h*/, void*) vector > contours; Mat bimage = image >= sliderPos; - findContours(bimage, contours, CV_RETR_LIST, CV_CHAIN_APPROX_NONE); + findContours(bimage, contours, RETR_LIST, CHAIN_APPROX_NONE); Mat cimage = Mat::zeros(bimage.size(), CV_8UC3); diff --git a/samples/cpp/houghcircles.cpp b/samples/cpp/houghcircles.cpp index bc53fd1249..b51faf53a5 100644 --- a/samples/cpp/houghcircles.cpp +++ b/samples/cpp/houghcircles.cpp @@ -27,10 +27,10 @@ int main(int argc, char** argv) Mat cimg; medianBlur(img, img, 5); - cvtColor(img, cimg, CV_GRAY2BGR); + cvtColor(img, cimg, COLOR_GRAY2BGR); vector circles; - HoughCircles(img, circles, CV_HOUGH_GRADIENT, 1, 10, + HoughCircles(img, circles, HOUGH_GRADIENT, 1, 10, 100, 30, 1, 30 // change the last two parameters // (min_radius & max_radius) to detect larger circles ); diff --git a/samples/cpp/houghlines.cpp b/samples/cpp/houghlines.cpp index b79ea2f5ff..7efb990e63 100644 --- a/samples/cpp/houghlines.cpp +++ b/samples/cpp/houghlines.cpp @@ -27,7 +27,7 @@ int main(int argc, char** argv) Mat dst, cdst; Canny(src, dst, 50, 200, 3); - cvtColor(dst, cdst, CV_GRAY2BGR); + cvtColor(dst, cdst, COLOR_GRAY2BGR); #if 0 vector lines; diff --git a/samples/cpp/image.cpp b/samples/cpp/image.cpp index 53b9bc1b9f..0e9ea9cf69 100644 --- a/samples/cpp/image.cpp +++ b/samples/cpp/image.cpp @@ -49,7 +49,7 @@ int main( int argc, char** argv ) return -1; Mat img_yuv; - cvtColor(img, img_yuv, CV_BGR2YCrCb); // convert image to YUV color space. The output image will be created automatically + cvtColor(img, img_yuv, COLOR_BGR2YCrCb); // convert image to YUV color space. The output image will be created automatically vector planes; // Vector is template vector class, similar to STL's vector. It can store matrices too. split(img_yuv, planes); // split the image into separate color planes @@ -107,7 +107,7 @@ int main( int argc, char** argv ) // now merge the results back merge(planes, img_yuv); // and produce the output RGB image - cvtColor(img_yuv, img, CV_YCrCb2BGR); + cvtColor(img_yuv, img, COLOR_YCrCb2BGR); // this is counterpart for cvNamedWindow namedWindow("image with grain", CV_WINDOW_AUTOSIZE); diff --git a/samples/cpp/laplace.cpp b/samples/cpp/laplace.cpp index 50cb8467ba..8bddeb820e 100644 --- a/samples/cpp/laplace.cpp +++ b/samples/cpp/laplace.cpp @@ -17,8 +17,10 @@ static void help() "./laplace [camera #, default 0]\n" << endl; } +enum {GAUSSIAN, BLUR, MEDIAN}; + int sigma = 3; -int smoothType = CV_GAUSSIAN; +int smoothType = GAUSSIAN; int main( int argc, char** argv ) { @@ -63,9 +65,9 @@ int main( int argc, char** argv ) break; int ksize = (sigma*5)|1; - if(smoothType == CV_GAUSSIAN) + if(smoothType == GAUSSIAN) GaussianBlur(frame, smoothed, Size(ksize, ksize), sigma, sigma); - else if(smoothType == CV_BLUR) + else if(smoothType == BLUR) blur(frame, smoothed, Size(ksize, ksize)); else medianBlur(frame, smoothed, ksize); @@ -76,7 +78,7 @@ int main( int argc, char** argv ) int c = waitKey(30); if( c == ' ' ) - smoothType = smoothType == CV_GAUSSIAN ? CV_BLUR : smoothType == CV_BLUR ? CV_MEDIAN : CV_GAUSSIAN; + smoothType = smoothType == GAUSSIAN ? BLUR : smoothType == BLUR ? MEDIAN : GAUSSIAN; if( c == 'q' || c == 'Q' || (c & 255) == 27 ) break; } diff --git a/samples/cpp/morphology2.cpp b/samples/cpp/morphology2.cpp index d4ab351545..3052f85d9a 100644 --- a/samples/cpp/morphology2.cpp +++ b/samples/cpp/morphology2.cpp @@ -37,9 +37,9 @@ static void OpenClose(int, void*) int an = n > 0 ? n : -n; Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) ); if( n < 0 ) - morphologyEx(src, dst, CV_MOP_OPEN, element); + morphologyEx(src, dst, MORPH_OPEN, element); else - morphologyEx(src, dst, CV_MOP_CLOSE, element); + morphologyEx(src, dst, MORPH_CLOSE, element); imshow("Open/Close",dst); } diff --git a/samples/cpp/phase_corr.cpp b/samples/cpp/phase_corr.cpp index d9a1419a7c..97172adc38 100644 --- a/samples/cpp/phase_corr.cpp +++ b/samples/cpp/phase_corr.cpp @@ -13,7 +13,7 @@ int main(int, char* []) do { video >> frame; - cvtColor(frame, curr, CV_RGB2GRAY); + cvtColor(frame, curr, COLOR_RGB2GRAY); if(prev.empty()) { diff --git a/samples/cpp/rgbdodometry.cpp b/samples/cpp/rgbdodometry.cpp index 660eb31d02..0db2fab847 100644 --- a/samples/cpp/rgbdodometry.cpp +++ b/samples/cpp/rgbdodometry.cpp @@ -125,8 +125,8 @@ int main(int argc, char** argv) } Mat grayImage0, grayImage1, depthFlt0, depthFlt1/*in meters*/; - cvtColor( colorImage0, grayImage0, CV_BGR2GRAY ); - cvtColor( colorImage1, grayImage1, CV_BGR2GRAY ); + cvtColor( colorImage0, grayImage0, COLOR_BGR2GRAY ); + cvtColor( colorImage1, grayImage1, COLOR_BGR2GRAY ); depth0.convertTo( depthFlt0, CV_32FC1, 1./1000 ); depth1.convertTo( depthFlt1, CV_32FC1, 1./1000 ); diff --git a/samples/cpp/segment_objects.cpp b/samples/cpp/segment_objects.cpp index d70d0350b7..8555a0e602 100644 --- a/samples/cpp/segment_objects.cpp +++ b/samples/cpp/segment_objects.cpp @@ -30,7 +30,7 @@ static void refineSegments(const Mat& img, Mat& mask, Mat& dst) erode(temp, temp, Mat(), Point(-1,-1), niters*2); dilate(temp, temp, Mat(), Point(-1,-1), niters); - findContours( temp, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE ); + findContours( temp, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE ); dst = Mat::zeros(img.size(), CV_8UC3); diff --git a/samples/cpp/squares.cpp b/samples/cpp/squares.cpp index 13a9946673..11887c1ca0 100644 --- a/samples/cpp/squares.cpp +++ b/samples/cpp/squares.cpp @@ -83,7 +83,7 @@ static void findSquares( const Mat& image, vector >& squares ) } // find contours and store them all as a list - findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); + findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE); vector approx; diff --git a/samples/cpp/stereo_calib.cpp b/samples/cpp/stereo_calib.cpp index 07621cef8c..57fde453cb 100644 --- a/samples/cpp/stereo_calib.cpp +++ b/samples/cpp/stereo_calib.cpp @@ -121,7 +121,7 @@ StereoCalib(const vector& imagelist, Size boardSize, bool useCalibrated= { cout << filename << endl; Mat cimg, cimg1; - cvtColor(img, cimg, CV_GRAY2BGR); + cvtColor(img, cimg, COLOR_GRAY2BGR); drawChessboardCorners(cimg, boardSize, corners, found); double sf = 640./MAX(img.rows, img.cols); resize(cimg, cimg1, Size(), sf, sf); @@ -304,10 +304,10 @@ StereoCalib(const vector& imagelist, Size boardSize, bool useCalibrated= for( k = 0; k < 2; k++ ) { Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg; - remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR); - cvtColor(rimg, cimg, CV_GRAY2BGR); + remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR); + cvtColor(rimg, cimg, COLOR_GRAY2BGR); Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h)); - resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA); + resize(cimg, canvasPart, canvasPart.size(), 0, 0, INTER_AREA); if( useCalibrated ) { Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf), diff --git a/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp index 49e4be0e54..5b5d9899e8 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp @@ -31,7 +31,7 @@ int main( int, char** argv ) } /// Convert to grayscale - cvtColor( src, src, CV_BGR2GRAY ); + cvtColor( src, src, COLOR_BGR2GRAY ); /// Apply Histogram Equalization equalizeHist( src, dst ); diff --git a/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp index e3633576f8..ffbc30bb18 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp @@ -74,7 +74,7 @@ void MatchingMethod( int, void* ) /// For SQDIFF and SQDIFF_NORMED, the best matches are lower values. For all the other methods, the higher the better - if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED ) + if( match_method == TM_SQDIFF || match_method == TM_SQDIFF_NORMED ) { matchLoc = minLoc; } else { matchLoc = maxLoc; } diff --git a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp index f422603749..e4a0af84b3 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp @@ -28,7 +28,7 @@ int main( int, char** argv ) /// Read the image src = imread( argv[1], 1 ); /// Transform it to HSV - cvtColor( src, hsv, CV_BGR2HSV ); + cvtColor( src, hsv, COLOR_BGR2HSV ); /// Use only the Hue value hue.create( hsv.size(), hsv.depth() ); diff --git a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp index 42dd01a671..c908cd83f1 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp @@ -31,7 +31,7 @@ int main( int, char** argv ) /// Read the image src = imread( argv[1], 1 ); /// Transform it to HSV - cvtColor( src, hsv, CV_BGR2HSV ); + cvtColor( src, hsv, COLOR_BGR2HSV ); /// Show the image namedWindow( window_image, CV_WINDOW_AUTOSIZE ); diff --git a/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp index b18aab2e8b..109f8131a3 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/compareHist_Demo.cpp @@ -33,9 +33,9 @@ int main( int argc, char** argv ) src_test2 = imread( argv[3], 1 ); /// Convert to HSV - cvtColor( src_base, hsv_base, CV_BGR2HSV ); - cvtColor( src_test1, hsv_test1, CV_BGR2HSV ); - cvtColor( src_test2, hsv_test2, CV_BGR2HSV ); + cvtColor( src_base, hsv_base, COLOR_BGR2HSV ); + cvtColor( src_test1, hsv_test1, COLOR_BGR2HSV ); + cvtColor( src_test2, hsv_test2, COLOR_BGR2HSV ); hsv_half_down = hsv_base( Range( hsv_base.rows/2, hsv_base.rows - 1 ), Range( 0, hsv_base.cols - 1 ) ); diff --git a/samples/cpp/tutorial_code/ImgProc/Threshold.cpp b/samples/cpp/tutorial_code/ImgProc/Threshold.cpp index 7505ec297b..7ba05db3c9 100644 --- a/samples/cpp/tutorial_code/ImgProc/Threshold.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Threshold.cpp @@ -37,7 +37,7 @@ int main( int, char** argv ) src = imread( argv[1], 1 ); /// Convert the image to Gray - cvtColor( src, src_gray, CV_RGB2GRAY ); + cvtColor( src, src_gray, COLOR_RGB2GRAY ); /// Create a window to display results namedWindow( window_name, CV_WINDOW_AUTOSIZE ); diff --git a/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp index c798f2fb42..f1455094a7 100644 --- a/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp @@ -58,7 +58,7 @@ int main( int, char** argv ) dst.create( src.size(), src.type() ); /// Convert the image to grayscale - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Create a window namedWindow( window_name, CV_WINDOW_AUTOSIZE ); diff --git a/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp index 5782e92a3c..09a6cefde2 100644 --- a/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp @@ -26,7 +26,7 @@ int main(int, char** argv) { return -1; } /// Convert it to gray - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Reduce the noise so we avoid false circle detection GaussianBlur( src_gray, src_gray, Size(9, 9), 2, 2 ); @@ -34,7 +34,7 @@ int main(int, char** argv) vector circles; /// Apply the Hough Transform to find the circles - HoughCircles( src_gray, circles, CV_HOUGH_GRADIENT, 1, src_gray.rows/8, 200, 100, 0, 0 ); + HoughCircles( src_gray, circles, HOUGH_GRADIENT, 1, src_gray.rows/8, 200, 100, 0, 0 ); /// Draw the circles detected for( size_t i = 0; i < circles.size(); i++ ) diff --git a/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp index 561948a581..e4064e1aa7 100644 --- a/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp @@ -46,7 +46,7 @@ int main( int, char** argv ) } /// Pass the image to gray - cvtColor( src, src_gray, CV_RGB2GRAY ); + cvtColor( src, src_gray, COLOR_RGB2GRAY ); /// Apply Canny edge detector Canny( src_gray, edges, 50, 200, 3 ); @@ -85,7 +85,7 @@ void help() void Standard_Hough( int, void* ) { vector s_lines; - cvtColor( edges, standard_hough, CV_GRAY2BGR ); + cvtColor( edges, standard_hough, COLOR_GRAY2BGR ); /// 1. Use Standard Hough Transform HoughLines( edges, s_lines, 1, CV_PI/180, min_threshold + s_trackbar, 0, 0 ); @@ -112,7 +112,7 @@ void Standard_Hough( int, void* ) void Probabilistic_Hough( int, void* ) { vector p_lines; - cvtColor( edges, probabilistic_hough, CV_GRAY2BGR ); + cvtColor( edges, probabilistic_hough, COLOR_GRAY2BGR ); /// 2. Use Probabilistic Hough Transform HoughLinesP( edges, p_lines, 1, CV_PI/180, min_threshold + p_trackbar, 30, 10 ); diff --git a/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp index 279dc6dcb6..f9033df471 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp @@ -34,7 +34,7 @@ int main( int, char** argv ) GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT ); /// Convert the image to grayscale - cvtColor( src, src_gray, CV_RGB2GRAY ); + cvtColor( src, src_gray, COLOR_RGB2GRAY ); /// Create window namedWindow( window_name, CV_WINDOW_AUTOSIZE ); diff --git a/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp index 40e5118a6d..814a01c4c4 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp @@ -47,7 +47,7 @@ int main( int, char** argv ) /// Update map_x & map_y. Then apply remap update_map(); - remap( src, dst, map_x, map_y, CV_INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0) ); + remap( src, dst, map_x, map_y, INTER_LINEAR, BORDER_CONSTANT, Scalar(0, 0, 0) ); // Display results imshow( remap_window, dst ); diff --git a/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp index 0ac9eb1a30..3a8130b332 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp @@ -33,7 +33,7 @@ int main( int, char** argv ) GaussianBlur( src, src, Size(3,3), 0, 0, BORDER_DEFAULT ); /// Convert it to gray - cvtColor( src, src_gray, CV_RGB2GRAY ); + cvtColor( src, src_gray, COLOR_RGB2GRAY ); /// Create window namedWindow( window_name, CV_WINDOW_AUTOSIZE ); diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp index e301476ee7..01a4f1c73a 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp @@ -30,7 +30,7 @@ int main( int, char** argv ) src = imread( argv[1], 1 ); /// Convert image to gray and blur it - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); blur( src_gray, src_gray, Size(3,3) ); /// Create Window @@ -57,7 +57,7 @@ void thresh_callback(int, void* ) /// Detect edges using canny Canny( src_gray, canny_output, thresh, thresh*2, 3 ); /// Find contours - findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); + findContours( canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) ); /// Draw contours Mat drawing = Mat::zeros( canny_output.size(), CV_8UC3 ); diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp index b973cfd973..dfc27e56ce 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp @@ -30,7 +30,7 @@ int main( int, char** argv ) src = imread( argv[1], 1 ); /// Convert image to gray and blur it - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); blur( src_gray, src_gray, Size(3,3) ); /// Create Window @@ -57,7 +57,7 @@ void thresh_callback(int, void* ) /// Detect edges using Threshold threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY ); /// Find contours - findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); + findContours( threshold_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) ); /// Approximate contours to polygons + get bounding rects and circles vector > contours_poly( contours.size() ); diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp index 70d8663c9b..cb93de231a 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp @@ -30,7 +30,7 @@ int main( int, char** argv ) src = imread( argv[1], 1 ); /// Convert image to gray and blur it - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); blur( src_gray, src_gray, Size(3,3) ); /// Create Window @@ -57,7 +57,7 @@ void thresh_callback(int, void* ) /// Detect edges using Threshold threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY ); /// Find contours - findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); + findContours( threshold_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) ); /// Find the rotated rectangles and ellipses for each contour vector minRect( contours.size() ); diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp index 8fe5d5b075..35c6f4db39 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp @@ -30,7 +30,7 @@ int main( int, char** argv ) src = imread( argv[1], 1 ); /// Convert image to gray and blur it - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); blur( src_gray, src_gray, Size(3,3) ); /// Create Window @@ -59,7 +59,7 @@ void thresh_callback(int, void* ) threshold( src_gray, threshold_output, thresh, 255, THRESH_BINARY ); /// Find contours - findContours( threshold_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); + findContours( threshold_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) ); /// Find the convex hull object for each contour vector >hull( contours.size() ); diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp index efd3c63d9e..277a06040e 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp @@ -30,7 +30,7 @@ int main( int, char** argv ) src = imread( argv[1], 1 ); /// Convert image to gray and blur it - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); blur( src_gray, src_gray, Size(3,3) ); /// Create Window @@ -57,7 +57,7 @@ void thresh_callback(int, void* ) /// Detect edges using canny Canny( src_gray, canny_output, thresh, thresh*2, 3 ); /// Find contours - findContours( canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0) ); + findContours( canny_output, contours, hierarchy, RETR_TREE, CHAIN_APPROX_SIMPLE, Point(0, 0) ); /// Get the moments vector mu(contours.size() ); diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp index b4789e3345..7184f94877 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp @@ -40,7 +40,7 @@ int main( int, char** argv ) { /// Load source image and convert it to gray src = imread( argv[1], 1 ); - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Set some parameters int blockSize = 3; int apertureSize = 3; diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp index 667ad7e32a..e77500bbfe 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp @@ -31,7 +31,7 @@ int main( int, char** argv ) { /// Load source image and convert it to gray src = imread( argv[1], 1 ); - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Create a window and a trackbar namedWindow( source_window, CV_WINDOW_AUTOSIZE ); diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp index 3b538119e8..d59d258aaf 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp @@ -32,7 +32,7 @@ int main( int, char** argv ) { /// Load source image and convert it to gray src = imread( argv[1], 1 ); - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Create Window namedWindow( source_window, CV_WINDOW_AUTOSIZE ); diff --git a/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp index e399c0c8ee..0f852d28cc 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp @@ -32,7 +32,7 @@ int main( int, char** argv ) { /// Load source image and convert it to gray src = imread( argv[1], 1 ); - cvtColor( src, src_gray, CV_BGR2GRAY ); + cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Create Window namedWindow( source_window, CV_WINDOW_AUTOSIZE ); diff --git a/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp b/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp index aeac2c8237..9b28f9d2ee 100644 --- a/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp +++ b/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp @@ -291,7 +291,7 @@ int main(int argc, char* argv[]) if( s.calibrationPattern == Settings::CHESSBOARD) { Mat viewGray; - cvtColor(view, viewGray, CV_BGR2GRAY); + cvtColor(view, viewGray, COLOR_BGR2GRAY); cornerSubPix( viewGray, pointBuf, Size(11,11), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); } diff --git a/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp b/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp index dfb13bdf64..fd69228a03 100644 --- a/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp +++ b/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp @@ -46,7 +46,7 @@ int main( int argc, char** argv ) // convert image to YUV color space. The output image will be created automatically. Mat I_YUV; - cvtColor(I, I_YUV, CV_BGR2YCrCb); + cvtColor(I, I_YUV, COLOR_BGR2YCrCb); vector planes; // Use the STL's vector structure to store multiple Mat objects split(I_YUV, planes); // split the image into separate color planes (Y U V) @@ -115,7 +115,7 @@ int main( int argc, char** argv ) merge(planes, I_YUV); // now merge the results back - cvtColor(I_YUV, I, CV_YCrCb2BGR); // and produce the output RGB image + cvtColor(I_YUV, I, COLOR_YCrCb2BGR); // and produce the output RGB image namedWindow("image with grain", CV_WINDOW_AUTOSIZE); // use this to create images diff --git a/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp b/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp index 0f5cb6a445..7ada2777a8 100644 --- a/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp +++ b/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp @@ -68,7 +68,7 @@ void detectAndDisplay( Mat frame ) std::vector faces; Mat frame_gray; - cvtColor( frame, frame_gray, CV_BGR2GRAY ); + cvtColor( frame, frame_gray, COLOR_BGR2GRAY ); equalizeHist( frame_gray, frame_gray ); //-- Detect faces face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) ); diff --git a/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp b/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp index 8e9b9d3305..ffaf21123c 100644 --- a/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp +++ b/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp @@ -68,7 +68,7 @@ void detectAndDisplay( Mat frame ) std::vector faces; Mat frame_gray; - cvtColor( frame, frame_gray, CV_BGR2GRAY ); + cvtColor( frame, frame_gray, COLOR_BGR2GRAY ); equalizeHist( frame_gray, frame_gray ); //-- Detect faces diff --git a/samples/cpp/video_dmtx.cpp b/samples/cpp/video_dmtx.cpp index 10889248a7..c5eaf1e393 100644 --- a/samples/cpp/video_dmtx.cpp +++ b/samples/cpp/video_dmtx.cpp @@ -52,7 +52,7 @@ namespace if (frame.empty()) break; cv::Mat gray; - cv::cvtColor(frame,gray,CV_RGB2GRAY); + cv::cvtColor(frame,gray, COLOR_RGB2GRAY); vector codes; Mat corners; findDataMatrix(gray, codes, corners); diff --git a/samples/cpp/video_homography.cpp b/samples/cpp/video_homography.cpp index 01af565c46..c8388007dd 100644 --- a/samples/cpp/video_homography.cpp +++ b/samples/cpp/video_homography.cpp @@ -161,7 +161,7 @@ int main(int ac, char ** av) if (frame.empty()) break; - cvtColor(frame, gray, CV_RGB2GRAY); + cvtColor(frame, gray, COLOR_RGB2GRAY); detector.detect(gray, query_kpts); //Find interest points diff --git a/samples/cpp/watershed.cpp b/samples/cpp/watershed.cpp index 923b8bf839..7494754086 100644 --- a/samples/cpp/watershed.cpp +++ b/samples/cpp/watershed.cpp @@ -59,8 +59,8 @@ int main( int argc, char** argv ) namedWindow( "image", 1 ); img0.copyTo(img); - cvtColor(img, markerMask, CV_BGR2GRAY); - cvtColor(markerMask, imgGray, CV_GRAY2BGR); + cvtColor(img, markerMask, COLOR_BGR2GRAY); + cvtColor(markerMask, imgGray, COLOR_GRAY2BGR); markerMask = Scalar::all(0); imshow( "image", img ); setMouseCallback( "image", onMouse, 0 ); @@ -85,7 +85,7 @@ int main( int argc, char** argv ) vector > contours; vector hierarchy; - findContours(markerMask, contours, hierarchy, CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE); + findContours(markerMask, contours, hierarchy, RETR_CCOMP, CHAIN_APPROX_SIMPLE); if( contours.empty() ) continue; diff --git a/samples/gpu/cascadeclassifier.cpp b/samples/gpu/cascadeclassifier.cpp index dee9d16c72..4e01ba4c73 100644 --- a/samples/gpu/cascadeclassifier.cpp +++ b/samples/gpu/cascadeclassifier.cpp @@ -29,7 +29,7 @@ void convertAndResize(const T& src, T& gray, T& resized, double scale) { if (src.channels() == 3) { - cvtColor( src, gray, CV_BGR2GRAY ); + cvtColor( src, gray, COLOR_BGR2GRAY ); } else { @@ -272,7 +272,7 @@ int main(int argc, const char *argv[]) } cout << endl; - cvtColor(resized_cpu, frameDisp, CV_GRAY2BGR); + cvtColor(resized_cpu, frameDisp, COLOR_GRAY2BGR); displayState(frameDisp, helpScreen, useGPU, findLargestObject, filterRects, fps); imshow("result", frameDisp); diff --git a/samples/gpu/generalized_hough.cpp b/samples/gpu/generalized_hough.cpp index 3febbf0f5f..c8fae7c411 100644 --- a/samples/gpu/generalized_hough.cpp +++ b/samples/gpu/generalized_hough.cpp @@ -86,11 +86,11 @@ int main(int argc, const char* argv[]) Mat templ = loadImage(templName); Mat image = loadImage(imageName); - int method = GHT_POSITION; + int method = cv::GeneralizedHough::GHT_POSITION; if (estimateScale) - method += GHT_SCALE; + method += cv::GeneralizedHough::GHT_SCALE; if (estimateRotation) - method += GHT_ROTATION; + method += cv::GeneralizedHough::GHT_ROTATION; vector position; cv::TickMeter tm; diff --git a/samples/gpu/hog.cpp b/samples/gpu/hog.cpp index 59bddb187a..3e451e87fa 100644 --- a/samples/gpu/hog.cpp +++ b/samples/gpu/hog.cpp @@ -296,8 +296,8 @@ void App::run() workBegin(); // Change format of the image - if (make_gray) cvtColor(frame, img_aux, CV_BGR2GRAY); - else if (use_gpu) cvtColor(frame, img_aux, CV_BGR2BGRA); + if (make_gray) cvtColor(frame, img_aux, COLOR_BGR2GRAY); + else if (use_gpu) cvtColor(frame, img_aux, COLOR_BGR2BGRA); else frame.copyTo(img_aux); // Resize image @@ -351,8 +351,8 @@ void App::run() throw std::runtime_error("can't create video writer"); } - if (make_gray) cvtColor(img_to_show, img, CV_GRAY2BGR); - else cvtColor(img_to_show, img, CV_BGRA2BGR); + if (make_gray) cvtColor(img_to_show, img, COLOR_GRAY2BGR); + else cvtColor(img_to_show, img, COLOR_BGRA2BGR); video_writer << img; } diff --git a/samples/gpu/houghlines.cpp b/samples/gpu/houghlines.cpp index f104e16fa5..52c55994b1 100644 --- a/samples/gpu/houghlines.cpp +++ b/samples/gpu/houghlines.cpp @@ -34,7 +34,7 @@ int main(int argc, const char* argv[]) Canny(src, mask, 100, 200, 3); Mat dst_cpu; - cvtColor(mask, dst_cpu, CV_GRAY2BGR); + cvtColor(mask, dst_cpu, COLOR_GRAY2BGR); Mat dst_gpu = dst_cpu.clone(); vector lines_cpu; diff --git a/samples/gpu/morphology.cpp b/samples/gpu/morphology.cpp index 5863eac10f..13b15807a8 100644 --- a/samples/gpu/morphology.cpp +++ b/samples/gpu/morphology.cpp @@ -39,9 +39,9 @@ static void OpenClose(int, void*) int an = n > 0 ? n : -n; Mat element = getStructuringElement(element_shape, Size(an*2+1, an*2+1), Point(an, an) ); if( n < 0 ) - cv::gpu::morphologyEx(src, dst, CV_MOP_OPEN, element); + cv::gpu::morphologyEx(src, dst, MORPH_OPEN, element); else - cv::gpu::morphologyEx(src, dst, CV_MOP_CLOSE, element); + cv::gpu::morphologyEx(src, dst, MORPH_CLOSE, element); imshow("Open/Close",(Mat)dst); } @@ -84,7 +84,7 @@ int main( int argc, char** argv ) { // gpu support only 4th channel images GpuMat src4ch; - cv::gpu::cvtColor(src, src4ch, CV_BGR2BGRA); + cv::gpu::cvtColor(src, src4ch, COLOR_BGR2BGRA); src = src4ch; } diff --git a/samples/gpu/stereo_match.cpp b/samples/gpu/stereo_match.cpp index 4b849a8c03..edf8886ffa 100644 --- a/samples/gpu/stereo_match.cpp +++ b/samples/gpu/stereo_match.cpp @@ -163,8 +163,8 @@ void App::run() right_src = imread(p.right); if (left_src.empty()) throw runtime_error("can't open file \"" + p.left + "\""); if (right_src.empty()) throw runtime_error("can't open file \"" + p.right + "\""); - cvtColor(left_src, left, CV_BGR2GRAY); - cvtColor(right_src, right, CV_BGR2GRAY); + cvtColor(left_src, left, COLOR_BGR2GRAY); + cvtColor(right_src, right, COLOR_BGR2GRAY); d_left.upload(left); d_right.upload(right); @@ -193,8 +193,8 @@ void App::run() if (d_left.channels() > 1 || d_right.channels() > 1) { cout << "BM doesn't support color images\n"; - cvtColor(left_src, left, CV_BGR2GRAY); - cvtColor(right_src, right, CV_BGR2GRAY); + cvtColor(left_src, left, COLOR_BGR2GRAY); + cvtColor(right_src, right, COLOR_BGR2GRAY); cout << "image_channels: " << left.channels() << endl; d_left.upload(left); d_right.upload(right); @@ -262,8 +262,8 @@ void App::handleKey(char key) } else { - cvtColor(left_src, left, CV_BGR2GRAY); - cvtColor(right_src, right, CV_BGR2GRAY); + cvtColor(left_src, left, COLOR_BGR2GRAY); + cvtColor(right_src, right, COLOR_BGR2GRAY); } d_left.upload(left); d_right.upload(right); diff --git a/samples/ocl/facedetect.cpp b/samples/ocl/facedetect.cpp index df6308cb9e..b5bab18e86 100644 --- a/samples/ocl/facedetect.cpp +++ b/samples/ocl/facedetect.cpp @@ -197,7 +197,7 @@ void detectAndDraw( Mat& img, cv::ocl::oclMat image(img); cv::ocl::oclMat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 ); - cv::ocl::cvtColor( image, gray, CV_BGR2GRAY ); + cv::ocl::cvtColor( image, gray, COLOR_BGR2GRAY ); cv::ocl::resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR ); cv::ocl::equalizeHist( smallImg, smallImg ); diff --git a/samples/ocl/hog.cpp b/samples/ocl/hog.cpp index c1623db8b9..a10ad94cd2 100644 --- a/samples/ocl/hog.cpp +++ b/samples/ocl/hog.cpp @@ -296,8 +296,8 @@ void App::run() workBegin(); // Change format of the image - if (make_gray) cvtColor(frame, img_aux, CV_BGR2GRAY); - else if (use_gpu) cvtColor(frame, img_aux, CV_BGR2BGRA); + if (make_gray) cvtColor(frame, img_aux, COLOR_BGR2GRAY); + else if (use_gpu) cvtColor(frame, img_aux, COLOR_BGR2BGRA); else frame.copyTo(img_aux); // Resize image @@ -351,8 +351,8 @@ void App::run() throw std::runtime_error("can't create video writer"); } - if (make_gray) cvtColor(img_to_show, img, CV_GRAY2BGR); - else cvtColor(img_to_show, img, CV_BGRA2BGR); + if (make_gray) cvtColor(img_to_show, img, COLOR_GRAY2BGR); + else cvtColor(img_to_show, img, COLOR_BGRA2BGR); video_writer << img; } diff --git a/samples/ocl/squares.cpp b/samples/ocl/squares.cpp index 6b184161f7..16a085538e 100644 --- a/samples/ocl/squares.cpp +++ b/samples/ocl/squares.cpp @@ -88,7 +88,7 @@ static void findSquares( const Mat& image, vector >& squares ) } // find contours and store them all as a list - findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE); + findContours(gray, contours, RETR_LIST, CHAIN_APPROX_SIMPLE); vector approx; diff --git a/samples/ocl/surf_matcher.cpp b/samples/ocl/surf_matcher.cpp index ea6ee97cb2..ba4bc102f1 100644 --- a/samples/ocl/surf_matcher.cpp +++ b/samples/ocl/surf_matcher.cpp @@ -89,12 +89,12 @@ int main(int argc, char* argv[]) if(argc != 5) { cpu_img1 = imread("o.png"); - cvtColor(cpu_img1, cpu_img1_grey, CV_BGR2GRAY); + cvtColor(cpu_img1, cpu_img1_grey, COLOR_BGR2GRAY); img1 = cpu_img1_grey; CV_Assert(!img1.empty()); cpu_img2 = imread("r2.png"); - cvtColor(cpu_img2, cpu_img2_grey, CV_BGR2GRAY); + cvtColor(cpu_img2, cpu_img2_grey, COLOR_BGR2GRAY); img2 = cpu_img2_grey; } else @@ -104,14 +104,14 @@ int main(int argc, char* argv[]) if (string(argv[i]) == "--left") { cpu_img1 = imread(argv[++i]); - cvtColor(cpu_img1, cpu_img1_grey, CV_BGR2GRAY); + cvtColor(cpu_img1, cpu_img1_grey, COLOR_BGR2GRAY); img1 = cpu_img1_grey; CV_Assert(!img1.empty()); } else if (string(argv[i]) == "--right") { cpu_img2 = imread(argv[++i]); - cvtColor(cpu_img2, cpu_img2_grey, CV_BGR2GRAY); + cvtColor(cpu_img2, cpu_img2_grey, COLOR_BGR2GRAY); img2 = cpu_img2_grey; } else if (string(argv[i]) == "--help") From 0738ea7d0f84c2b3725f6fc721f03c533ba058a1 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Sun, 7 Apr 2013 22:45:38 +0400 Subject: [PATCH 2/6] Make highgui.hpp independent from C API --- 3rdparty/ffmpeg/ffmpeg_version.cmake | 1 + cmake/OpenCVFindLibsVideo.cmake | 6 +- .../calib3d/test/test_chesscorners_timing.cpp | 15 +- modules/contrib/src/chamfermatching.cpp | 3 +- modules/core/include/opencv2/core.hpp | 43 +- modules/highgui/CMakeLists.txt | 1 + modules/highgui/include/opencv2/highgui.hpp | 496 ++++++++++++++---- .../include/opencv2/highgui/highgui_c.h | 3 - modules/highgui/perf/perf_output.cpp | 2 +- modules/highgui/src/cap.cpp | 13 +- modules/highgui/src/cap_ffmpeg.cpp | 2 +- modules/highgui/src/ffmpeg_codecs.hpp | 4 +- modules/highgui/src/precomp.hpp | 37 -- modules/highgui/src/window.cpp | 15 +- modules/highgui/test/test_ffmpeg.cpp | 10 +- modules/highgui/test/test_framecount.cpp | 4 +- modules/highgui/test/test_grfmt.cpp | 16 +- modules/highgui/test/test_gui.cpp | 4 +- modules/highgui/test/test_positioning.cpp | 10 +- modules/highgui/test/test_precomp.hpp | 1 + modules/highgui/test/test_video_io.cpp | 22 +- modules/highgui/test/test_video_pos.cpp | 10 +- modules/imgproc/test/test_color.cpp | 16 +- modules/imgproc/test/test_grabcut.cpp | 4 +- modules/imgproc/test/test_precomp.hpp | 4 +- .../java/generator/src/cpp/VideoCapture.cpp | 5 +- modules/legacy/src/oneway.cpp | 11 +- modules/objdetect/src/latentsvm.cpp | 6 +- .../objdetect/test/test_latentsvmdetector.cpp | 9 +- modules/ocl/perf/perf_canny.cpp | 2 +- modules/ocl/perf/perf_haar.cpp | 2 +- modules/photo/test/test_denoising.cpp | 16 +- modules/python/src2/cv2.cpp | 2 + modules/python/src2/cv2.cv.hpp | 1 + modules/python/src2/gen2.py | 1 + modules/video/test/test_camshift.cpp | 4 +- modules/video/test/test_optflowpyrlk.cpp | 20 +- modules/videostab/src/frame_source.cpp | 8 +- samples/c/adaptiveskindetector.cpp | 2 +- samples/c/bgfg_codebook.cpp | 2 +- samples/c/blobtrack_sample.cpp | 2 +- samples/c/convert_cascade.c | 2 +- samples/c/facedetect.cpp | 8 +- samples/c/fback_c.c | 2 +- samples/c/find_obj.cpp | 2 +- samples/c/find_obj_calonder.cpp | 4 +- samples/c/find_obj_ferns.cpp | 10 +- samples/c/latentsvmdetect.cpp | 4 +- samples/c/morphology.c | 2 +- samples/c/motempl.c | 2 +- samples/c/one_way_sample.cpp | 4 +- samples/c/smiledetect.cpp | 8 +- samples/cpp/Qt_sample/main.cpp | 45 +- samples/cpp/bgfg_segm.cpp | 8 +- samples/cpp/brief_match_test.cpp | 4 +- samples/cpp/calibration_artificial.cpp | 2 +- samples/cpp/camshiftdemo.cpp | 4 +- samples/cpp/contours2.cpp | 2 +- samples/cpp/convexhull.cpp | 4 +- samples/cpp/dbt_face_detection.cpp | 2 +- samples/cpp/delaunay2.cpp | 16 +- samples/cpp/demhist.cpp | 2 +- samples/cpp/dft.cpp | 4 +- samples/cpp/distrans.cpp | 2 +- samples/cpp/drawing.cpp | 6 +- samples/cpp/ffilldemo.cpp | 2 +- samples/cpp/fitellipse.cpp | 6 +- samples/cpp/freak_demo.cpp | 6 +- samples/cpp/generic_descriptor_match.cpp | 4 +- samples/cpp/grabcut.cpp | 30 +- samples/cpp/houghcircles.cpp | 4 +- samples/cpp/houghlines.cpp | 2 +- samples/cpp/hybridtrackingsample.cpp | 10 +- samples/cpp/image.cpp | 14 +- samples/cpp/image_alignment.cpp | 8 +- samples/cpp/inpaint.cpp | 6 +- samples/cpp/kmeans.cpp | 6 +- samples/cpp/laplace.cpp | 8 +- samples/cpp/linemod.cpp | 16 +- samples/cpp/lkdemo.cpp | 2 +- samples/cpp/matcher_simple.cpp | 4 +- samples/cpp/matching_to_many_images.cpp | 4 +- samples/cpp/meanshift_segmentation.cpp | 2 +- samples/cpp/minarea.cpp | 8 +- samples/cpp/morphology2.cpp | 2 +- samples/cpp/openni_capture.cpp | 48 +- samples/cpp/pca.cpp | 12 +- samples/cpp/phase_corr.cpp | 4 +- samples/cpp/points_classifier.cpp | 6 +- samples/cpp/segment_objects.cpp | 2 +- samples/cpp/select3dobj.cpp | 10 +- samples/cpp/squares.cpp | 2 +- samples/cpp/starter_imagelist.cpp | 18 +- samples/cpp/starter_video.cpp | 2 +- .../video-input-psnr-ssim.cpp | 20 +- .../HighGUI/video-write/video-write.cpp | 12 +- .../Histograms_Matching/EqualizeHist_Demo.cpp | 4 +- .../MatchTemplate_Demo.cpp | 4 +- .../calcBackProject_Demo1.cpp | 2 +- .../calcBackProject_Demo2.cpp | 4 +- .../Histograms_Matching/calcHist_Demo.cpp | 2 +- .../tutorial_code/ImgProc/Morphology_1.cpp | 6 +- .../tutorial_code/ImgProc/Morphology_2.cpp | 2 +- .../cpp/tutorial_code/ImgProc/Pyramids.cpp | 2 +- .../cpp/tutorial_code/ImgProc/Smoothing.cpp | 4 +- .../cpp/tutorial_code/ImgProc/Threshold.cpp | 2 +- .../ImgTrans/CannyDetector_Demo.cpp | 2 +- .../ImgTrans/Geometric_Transforms_Demo.cpp | 6 +- .../ImgTrans/HoughCircle_Demo.cpp | 2 +- .../ImgTrans/HoughLines_Demo.cpp | 8 +- .../tutorial_code/ImgTrans/Laplace_Demo.cpp | 2 +- .../cpp/tutorial_code/ImgTrans/Remap_Demo.cpp | 2 +- .../cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp | 2 +- .../ImgTrans/copyMakeBorder_demo.cpp | 2 +- .../tutorial_code/ImgTrans/filter2D_demo.cpp | 2 +- .../ShapeDescriptors/findContours_demo.cpp | 4 +- .../generalContours_demo1.cpp | 4 +- .../generalContours_demo2.cpp | 4 +- .../ShapeDescriptors/hull_demo.cpp | 4 +- .../ShapeDescriptors/moments_demo.cpp | 4 +- .../pointPolygonTest_demo.cpp | 4 +- .../TrackingMotion/cornerDetector_Demo.cpp | 4 +- .../TrackingMotion/cornerHarris_Demo.cpp | 4 +- .../TrackingMotion/cornerSubPix_Demo.cpp | 6 +- .../goodFeaturesToTrack_Demo.cpp | 4 +- .../camera_calibration/camera_calibration.cpp | 2 +- .../calib3d/stereoBM/SBM_Sample.cpp | 6 +- .../tutorial_code/core/Matrix/Drawing_1.cpp | 4 +- .../tutorial_code/core/Matrix/Drawing_2.cpp | 4 +- .../discrete_fourier_transform.cpp | 4 +- .../how_to_scan_images/how_to_scan_images.cpp | 4 +- .../interoperability_with_OpenCV_1.cpp | 6 +- .../mat_mask_operations.cpp | 12 +- .../features2D/SURF_FlannMatcher.cpp | 4 +- .../features2D/SURF_Homography.cpp | 4 +- .../features2D/SURF_descriptor.cpp | 4 +- .../features2D/SURF_detector.cpp | 4 +- .../display_image/display_image.cpp | 10 +- .../windows_visual_studio_Opencv/Test.cpp | 20 +- .../objectDetection/objectDetection.cpp | 2 + .../objectDetection/objectDetection2.cpp | 2 + samples/cpp/video_dmtx.cpp | 2 +- samples/cpp/videostab.cpp | 2 +- samples/cpp/watershed.cpp | 6 +- samples/gpu/hog.cpp | 2 +- samples/gpu/morphology.cpp | 2 +- samples/gpu/performance/tests.cpp | 8 +- samples/gpu/super_resolution.cpp | 2 +- samples/gpu/surf_keypoint_matcher.cpp | 4 +- samples/gpu/video_writer.cpp | 2 +- samples/ocl/facedetect.cpp | 3 + samples/ocl/hog.cpp | 2 +- 152 files changed, 899 insertions(+), 594 deletions(-) diff --git a/3rdparty/ffmpeg/ffmpeg_version.cmake b/3rdparty/ffmpeg/ffmpeg_version.cmake index 3f27077d6a..3cbcb394d0 100644 --- a/3rdparty/ffmpeg/ffmpeg_version.cmake +++ b/3rdparty/ffmpeg/ffmpeg_version.cmake @@ -1,3 +1,4 @@ +set(HAVE_FFMPEG 1) set(NEW_FFMPEG 1) set(HAVE_FFMPEG_CODEC 1) set(HAVE_FFMPEG_FORMAT 1) diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index 1ae6a5102f..dbf2a2b7ea 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -29,7 +29,7 @@ if(WITH_GSTREAMER AND NOT WITH_GSTREAMER_1_X) set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-0.10_VERSION}) set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-0.10_VERSION}) endif() - + endif(WITH_GSTREAMER AND NOT WITH_GSTREAMER_1_X) # if gstreamer 0.10 was not found, or we specified we wanted 1.x, try to find it @@ -40,7 +40,7 @@ if(WITH_GSTREAMER_1_X OR NOT HAVE_GSTREAMER) CHECK_MODULE(gstreamer-app-1.0 HAVE_GSTREAMER_APP) CHECK_MODULE(gstreamer-riff-1.0 HAVE_GSTREAMER_RIFF) CHECK_MODULE(gstreamer-pbutils-1.0 HAVE_GSTREAMER_PBUTILS) - + if(HAVE_GSTREAMER_BASE AND HAVE_GSTREAMER_VIDEO AND HAVE_GSTREAMER_APP AND HAVE_GSTREAMER_RIFF AND HAVE_GSTREAMER_PBUTILS) set(HAVE_GSTREAMER TRUE) set(GSTREAMER_BASE_VERSION ${ALIASOF_gstreamer-base-1.0_VERSION}) @@ -49,7 +49,7 @@ if(WITH_GSTREAMER_1_X OR NOT HAVE_GSTREAMER) set(GSTREAMER_RIFF_VERSION ${ALIASOF_gstreamer-riff-1.0_VERSION}) set(GSTREAMER_PBUTILS_VERSION ${ALIASOF_gstreamer-pbutils-1.0_VERSION}) endif() - + endif(WITH_GSTREAMER_1_X OR NOT HAVE_GSTREAMER) # --- unicap --- diff --git a/modules/calib3d/test/test_chesscorners_timing.cpp b/modules/calib3d/test/test_chesscorners_timing.cpp index 6195c04b07..171a74857b 100644 --- a/modules/calib3d/test/test_chesscorners_timing.cpp +++ b/modules/calib3d/test/test_chesscorners_timing.cpp @@ -66,7 +66,7 @@ void CV_ChessboardDetectorTimingTest::run( int start_from ) CvMat* _v = 0; CvPoint2D32f* v; - IplImage* img = 0; + IplImage img; IplImage* gray = 0; IplImage* thresh = 0; @@ -105,9 +105,10 @@ void CV_ChessboardDetectorTimingTest::run( int start_from ) /* read the image */ sprintf( filename, "%s%s", filepath, imgname ); - img = cvLoadImage( filename ); + cv::Mat img2 = cv::imread( filename ); + img = img2; - if( !img ) + if( img2.empty() ) { ts->printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", filename ); if( max_idx == 1 ) @@ -120,9 +121,9 @@ void CV_ChessboardDetectorTimingTest::run( int start_from ) ts->printf(cvtest::TS::LOG, "%s: chessboard %d:\n", imgname, is_chessboard); - gray = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 ); - thresh = cvCreateImage( cvSize( img->width, img->height ), IPL_DEPTH_8U, 1 ); - cvCvtColor( img, gray, CV_BGR2GRAY ); + gray = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 ); + thresh = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 ); + cvCvtColor( &img, gray, CV_BGR2GRAY ); count0 = pattern_size.width*pattern_size.height; @@ -164,7 +165,6 @@ void CV_ChessboardDetectorTimingTest::run( int start_from ) find_chessboard_time*1e-6, find_chessboard_time/num_pixels); cvReleaseMat( &_v ); - cvReleaseImage( &img ); cvReleaseImage( &gray ); cvReleaseImage( &thresh ); progress = update_progress( progress, idx-1, max_idx, 0 ); @@ -175,7 +175,6 @@ _exit_: /* release occupied memory */ cvReleaseMat( &_v ); cvReleaseFileStorage( &fs ); - cvReleaseImage( &img ); cvReleaseImage( &gray ); cvReleaseImage( &thresh ); diff --git a/modules/contrib/src/chamfermatching.cpp b/modules/contrib/src/chamfermatching.cpp index bd30ad9916..16ac947d63 100644 --- a/modules/contrib/src/chamfermatching.cpp +++ b/modules/contrib/src/chamfermatching.cpp @@ -912,8 +912,7 @@ void ChamferMatcher::Template::show() const #ifdef HAVE_OPENCV_HIGHGUI namedWindow("templ",1); imshow("templ",templ_color); - - cvWaitKey(0); + waitKey(); #else CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without GUI support"); #endif diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index e75127a3ec..d9ecbfc2ae 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -496,67 +496,73 @@ CV_EXPORTS void randShuffle(InputOutputArray dst, double iterFactor = 1., RNG* r CV_EXPORTS_AS(randShuffle) void randShuffle_(InputOutputArray dst, double iterFactor = 1.); +enum { FILLED = -1, + LINE_4 = 4, + LINE_8 = 8, + LINE_AA = 16 + }; + //! draws the line segment (pt1, pt2) in the image CV_EXPORTS_W void line(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color, - int thickness = 1, int lineType = 8, int shift = 0); + int thickness = 1, int lineType = LINE_8, int shift = 0); //! draws the rectangle outline or a solid rectangle with the opposite corners pt1 and pt2 in the image CV_EXPORTS_W void rectangle(CV_IN_OUT Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness = 1, - int lineType = 8, int shift = 0); + int lineType = LINE_8, int shift = 0); //! draws the rectangle outline or a solid rectangle covering rec in the image CV_EXPORTS void rectangle(CV_IN_OUT Mat& img, Rect rec, const Scalar& color, int thickness = 1, - int lineType = 8, int shift = 0); + int lineType = LINE_8, int shift = 0); //! draws the circle outline or a solid circle in the image CV_EXPORTS_W void circle(CV_IN_OUT Mat& img, Point center, int radius, const Scalar& color, int thickness = 1, - int lineType = 8, int shift = 0); + int lineType = LINE_8, int shift = 0); //! draws an elliptic arc, ellipse sector or a rotated ellipse in the image CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness = 1, - int lineType = 8, int shift = 0); + int lineType = LINE_8, int shift = 0); //! draws a rotated ellipse in the image CV_EXPORTS_W void ellipse(CV_IN_OUT Mat& img, const RotatedRect& box, const Scalar& color, - int thickness = 1, int lineType = 8); + int thickness = 1, int lineType = LINE_8); //! draws a filled convex polygon in the image CV_EXPORTS void fillConvexPoly(Mat& img, const Point* pts, int npts, - const Scalar& color, int lineType = 8, + const Scalar& color, int lineType = LINE_8, int shift = 0); CV_EXPORTS_W void fillConvexPoly(InputOutputArray img, InputArray points, - const Scalar& color, int lineType = 8, + const Scalar& color, int lineType = LINE_8, int shift = 0); //! fills an area bounded by one or more polygons CV_EXPORTS void fillPoly(Mat& img, const Point** pts, const int* npts, int ncontours, - const Scalar& color, int lineType = 8, int shift = 0, + const Scalar& color, int lineType = LINE_8, int shift = 0, Point offset = Point() ); CV_EXPORTS_W void fillPoly(InputOutputArray img, InputArrayOfArrays pts, - const Scalar& color, int lineType = 8, int shift = 0, + const Scalar& color, int lineType = LINE_8, int shift = 0, Point offset = Point() ); //! draws one or more polygonal curves CV_EXPORTS void polylines(Mat& img, const Point* const* pts, const int* npts, int ncontours, bool isClosed, const Scalar& color, - int thickness = 1, int lineType = 8, int shift = 0 ); + int thickness = 1, int lineType = LINE_8, int shift = 0 ); CV_EXPORTS_W void polylines(InputOutputArray img, InputArrayOfArrays pts, bool isClosed, const Scalar& color, - int thickness = 1, int lineType = 8, int shift = 0 ); + int thickness = 1, int lineType = LINE_8, int shift = 0 ); //! draws contours in the image CV_EXPORTS_W void drawContours( InputOutputArray image, InputArrayOfArrays contours, int contourIdx, const Scalar& color, - int thickness = 1, int lineType = 8, + int thickness = 1, int lineType = LINE_8, InputArray hierarchy = noArray(), int maxLevel = INT_MAX, Point offset = Point() ); @@ -587,7 +593,7 @@ enum //! renders text string in the image CV_EXPORTS_W void putText( Mat& img, const String& text, Point org, int fontFace, double fontScale, Scalar color, - int thickness = 1, int lineType = 8, + int thickness = 1, int lineType = LINE_8, bool bottomLeftOrigin = false ); //! returns bounding box of the text string @@ -631,9 +637,9 @@ CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType); PCA pca(pcaset, // pass the data Mat(), // we do not have a pre-computed mean vector, // so let the PCA engine to compute it - CV_PCA_DATA_AS_ROW, // indicate that the vectors + PCA::DATA_AS_ROW, // indicate that the vectors // are stored as matrix rows - // (use CV_PCA_DATA_AS_COL if the vectors are + // (use PCA::DATA_AS_COL if the vectors are // the matrix columns) maxComponents // specify, how many principal components to retain ); @@ -663,6 +669,11 @@ CV_EXPORTS ConvertScaleData getConvertScaleElem(int fromType, int toType); class CV_EXPORTS PCA { public: + enum { DATA_AS_ROW = 0, + DATA_AS_COL = 1, + USE_AVG = 2 + }; + //! default constructor PCA(); diff --git a/modules/highgui/CMakeLists.txt b/modules/highgui/CMakeLists.txt index 166a834692..5955ab6c13 100644 --- a/modules/highgui/CMakeLists.txt +++ b/modules/highgui/CMakeLists.txt @@ -221,6 +221,7 @@ endif() if(WIN32) link_directories("${OpenCV_SOURCE_DIR}/3rdparty/lib") # for ffmpeg wrapper only include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include") # for directshow in VS2005 and multi-monitor support on MinGW + include_directories(AFTER SYSTEM "${OpenCV_SOURCE_DIR}/3rdparty/include/ffmpeg_") # for tests endif() if(UNIX) diff --git a/modules/highgui/include/opencv2/highgui.hpp b/modules/highgui/include/opencv2/highgui.hpp index 19fed92960..693ff7906f 100644 --- a/modules/highgui/include/opencv2/highgui.hpp +++ b/modules/highgui/include/opencv2/highgui.hpp @@ -43,32 +43,81 @@ #ifndef __OPENCV_HIGHGUI_HPP__ #define __OPENCV_HIGHGUI_HPP__ -#include "opencv2/highgui/highgui_c.h" - -#ifdef __cplusplus #include "opencv2/core.hpp" -struct CvCapture; -struct CvVideoWriter; +///////////////////////// graphical user interface ////////////////////////// namespace cv { -enum { - // Flags for namedWindow - WINDOW_NORMAL = CV_WINDOW_NORMAL, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size - WINDOW_AUTOSIZE = CV_WINDOW_AUTOSIZE, // the user cannot resize the window, the size is constrainted by the image displayed - WINDOW_OPENGL = CV_WINDOW_OPENGL, // window with opengl support +// Flags for namedWindow +enum { WINDOW_NORMAL = 0x00000000, // the user can resize the window (no constraint) / also use to switch a fullscreen window to a normal size + WINDOW_AUTOSIZE = 0x00000001, // the user cannot resize the window, the size is constrainted by the image displayed + WINDOW_OPENGL = 0x00001000, // window with opengl support + + WINDOW_FULLSCREEN = 1, // change the window to fullscreen + WINDOW_FREERATIO = 0x00000100, // the image expends as much as it can (no ratio constraint) + WINDOW_KEEPRATIO = 0x00000000 // the ratio of the image is respected + }; + +// Flags for set / getWindowProperty +enum { WND_PROP_FULLSCREEN = 0, // fullscreen property (can be WINDOW_NORMAL or WINDOW_FULLSCREEN) + WND_PROP_AUTOSIZE = 1, // autosize property (can be WINDOW_NORMAL or WINDOW_AUTOSIZE) + WND_PROP_ASPECT_RATIO = 2, // window's aspect ration (can be set to WINDOW_FREERATIO or WINDOW_KEEPRATIO); + WND_PROP_OPENGL = 3 // opengl support + }; + +enum { EVENT_MOUSEMOVE = 0, + EVENT_LBUTTONDOWN = 1, + EVENT_RBUTTONDOWN = 2, + EVENT_MBUTTONDOWN = 3, + EVENT_LBUTTONUP = 4, + EVENT_RBUTTONUP = 5, + EVENT_MBUTTONUP = 6, + EVENT_LBUTTONDBLCLK = 7, + EVENT_RBUTTONDBLCLK = 8, + EVENT_MBUTTONDBLCLK = 9 + }; + +enum { EVENT_FLAG_LBUTTON = 1, + EVENT_FLAG_RBUTTON = 2, + EVENT_FLAG_MBUTTON = 4, + EVENT_FLAG_CTRLKEY = 8, + EVENT_FLAG_SHIFTKEY = 16, + EVENT_FLAG_ALTKEY = 32 + }; + +// Qt font +enum { QT_FONT_LIGHT = 25, //QFont::Light, + QT_FONT_NORMAL = 50, //QFont::Normal, + QT_FONT_DEMIBOLD = 63, //QFont::DemiBold, + QT_FONT_BOLD = 75, //QFont::Bold, + QT_FONT_BLACK = 87 //QFont::Black + }; + +// Qt font style +enum { QT_STYLE_NORMAL = 0, //QFont::StyleNormal, + QT_STYLE_ITALIC = 1, //QFont::StyleItalic, + QT_STYLE_OBLIQUE = 2 //QFont::StyleOblique + }; + +// Qt "button" type +enum { QT_PUSH_BUTTON = 0, + QT_CHECKBOX = 1, + QT_RADIOBOX = 2 + }; + + +typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata); +typedef void (*TrackbarCallback)(int pos, void* userdata); +typedef void (*OpenGlDrawCallback)(void* userdata); +typedef void (*ButtonCallback)(int state, void* userdata); - // Flags for set / getWindowProperty - WND_PROP_FULLSCREEN = CV_WND_PROP_FULLSCREEN, // fullscreen property - WND_PROP_AUTOSIZE = CV_WND_PROP_AUTOSIZE, // autosize property - WND_PROP_ASPECT_RATIO = CV_WND_PROP_ASPECTRATIO, // window's aspect ration - WND_PROP_OPENGL = CV_WND_PROP_OPENGL // opengl support -}; CV_EXPORTS_W void namedWindow(const String& winname, int flags = WINDOW_AUTOSIZE); + CV_EXPORTS_W void destroyWindow(const String& winname); + CV_EXPORTS_W void destroyAllWindows(); CV_EXPORTS_W int startWindowThread(); @@ -78,123 +127,373 @@ CV_EXPORTS_W int waitKey(int delay = 0); CV_EXPORTS_W void imshow(const String& winname, InputArray mat); CV_EXPORTS_W void resizeWindow(const String& winname, int width, int height); + CV_EXPORTS_W void moveWindow(const String& winname, int x, int y); -CV_EXPORTS_W void setWindowProperty(const String& winname, int prop_id, double prop_value);//YV -CV_EXPORTS_W double getWindowProperty(const String& winname, int prop_id);//YV +CV_EXPORTS_W void setWindowProperty(const String& winname, int prop_id, double prop_value); -enum -{ - EVENT_MOUSEMOVE =0, - EVENT_LBUTTONDOWN =1, - EVENT_RBUTTONDOWN =2, - EVENT_MBUTTONDOWN =3, - EVENT_LBUTTONUP =4, - EVENT_RBUTTONUP =5, - EVENT_MBUTTONUP =6, - EVENT_LBUTTONDBLCLK =7, - EVENT_RBUTTONDBLCLK =8, - EVENT_MBUTTONDBLCLK =9 -}; - -enum -{ - EVENT_FLAG_LBUTTON =1, - EVENT_FLAG_RBUTTON =2, - EVENT_FLAG_MBUTTON =4, - EVENT_FLAG_CTRLKEY =8, - EVENT_FLAG_SHIFTKEY =16, - EVENT_FLAG_ALTKEY =32 -}; - -typedef void (*MouseCallback)(int event, int x, int y, int flags, void* userdata); +CV_EXPORTS_W double getWindowProperty(const String& winname, int prop_id); //! assigns callback for mouse events CV_EXPORTS void setMouseCallback(const String& winname, MouseCallback onMouse, void* userdata = 0); - -typedef void (CV_CDECL *TrackbarCallback)(int pos, void* userdata); - CV_EXPORTS int createTrackbar(const String& trackbarname, const String& winname, int* value, int count, TrackbarCallback onChange = 0, void* userdata = 0); CV_EXPORTS_W int getTrackbarPos(const String& trackbarname, const String& winname); + CV_EXPORTS_W void setTrackbarPos(const String& trackbarname, const String& winname, int pos); -// OpenGL support -typedef void (*OpenGlDrawCallback)(void* userdata); +// OpenGL support CV_EXPORTS void setOpenGlDrawCallback(const String& winname, OpenGlDrawCallback onOpenGlDraw, void* userdata = 0); CV_EXPORTS void setOpenGlContext(const String& winname); CV_EXPORTS void updateWindow(const String& winname); -//Only for Qt -CV_EXPORTS CvFont fontQt(const String& nameFont, int pointSize=-1, - Scalar color=Scalar::all(0), int weight=CV_FONT_NORMAL, - int style=CV_STYLE_NORMAL, int spacing=0); -CV_EXPORTS void addText( const Mat& img, const String& text, Point org, CvFont font); +// Only for Qt -CV_EXPORTS void displayOverlay(const String& winname, const String& text, int delayms CV_DEFAULT(0)); -CV_EXPORTS void displayStatusBar(const String& winname, const String& text, int delayms CV_DEFAULT(0)); +struct QtFont +{ + const char* nameFont; // Qt: nameFont + Scalar color; // Qt: ColorFont -> cvScalar(blue_component, green_component, red\_component[, alpha_component]) + int font_face; // Qt: bool italic + const int* ascii; // font data and metrics + const int* greek; + const int* cyrillic; + float hscale, vscale; + float shear; // slope coefficient: 0 - normal, >0 - italic + int thickness; // Qt: weight + float dx; // horizontal interval between letters + int line_type; // Qt: PointSize +}; + +CV_EXPORTS QtFont fontQt(const String& nameFont, int pointSize = -1, + Scalar color = Scalar::all(0), int weight = QT_FONT_NORMAL, + int style = QT_STYLE_NORMAL, int spacing = 0); + +CV_EXPORTS void addText( const Mat& img, const String& text, Point org, const QtFont& font); + +CV_EXPORTS void displayOverlay(const String& winname, const String& text, int delayms = 0); + +CV_EXPORTS void displayStatusBar(const String& winname, const String& text, int delayms = 0); CV_EXPORTS void saveWindowParameters(const String& windowName); + CV_EXPORTS void loadWindowParameters(const String& windowName); + CV_EXPORTS int startLoop(int (*pt2Func)(int argc, char *argv[]), int argc, char* argv[]); + CV_EXPORTS void stopLoop(); -typedef void (CV_CDECL *ButtonCallback)(int state, void* userdata); CV_EXPORTS int createButton( const String& bar_name, ButtonCallback on_change, - void* userdata=NULL, int type=CV_PUSH_BUTTON, - bool initial_button_state=0); + void* userdata = 0, int type = QT_PUSH_BUTTON, + bool initial_button_state = false); -//------------------------- +} // cv -enum + + +//////////////////////////////// image codec //////////////////////////////// +namespace cv { - // 8bit, color or not - IMREAD_UNCHANGED =-1, - // 8bit, gray - IMREAD_GRAYSCALE =0, - // ?, color - IMREAD_COLOR =1, - // any depth, ? - IMREAD_ANYDEPTH =2, - // ?, any color - IMREAD_ANYCOLOR =4 -}; -enum -{ - IMWRITE_JPEG_QUALITY =1, - IMWRITE_PNG_COMPRESSION =16, - IMWRITE_PNG_STRATEGY =17, - IMWRITE_PNG_BILEVEL =18, - IMWRITE_PNG_STRATEGY_DEFAULT =0, - IMWRITE_PNG_STRATEGY_FILTERED =1, - IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY =2, - IMWRITE_PNG_STRATEGY_RLE =3, - IMWRITE_PNG_STRATEGY_FIXED =4, - IMWRITE_PXM_BINARY =32 -}; +enum { IMREAD_UNCHANGED = -1, // 8bit, color or not + IMREAD_GRAYSCALE = 0, // 8bit, gray + IMREAD_COLOR = 1, // ?, color + IMREAD_ANYDEPTH = 2, // any depth, ? + IMREAD_ANYCOLOR = 4 // ?, any color + }; + +enum { IMWRITE_JPEG_QUALITY = 1, + IMWRITE_PNG_COMPRESSION = 16, + IMWRITE_PNG_STRATEGY = 17, + IMWRITE_PNG_BILEVEL = 18, + IMWRITE_PXM_BINARY = 32, + IMWRITE_WEBP_QUALITY = 64 + }; + +enum { IMWRITE_PNG_STRATEGY_DEFAULT = 0, + IMWRITE_PNG_STRATEGY_FILTERED = 1, + IMWRITE_PNG_STRATEGY_HUFFMAN_ONLY = 2, + IMWRITE_PNG_STRATEGY_RLE = 3, + IMWRITE_PNG_STRATEGY_FIXED = 4 + }; + +CV_EXPORTS_W Mat imread( const String& filename, int flags = IMREAD_COLOR ); -CV_EXPORTS_W Mat imread( const String& filename, int flags=1 ); CV_EXPORTS_W bool imwrite( const String& filename, InputArray img, - const std::vector& params=std::vector()); + const std::vector& params = std::vector()); + CV_EXPORTS_W Mat imdecode( InputArray buf, int flags ); -CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst ); + +CV_EXPORTS Mat imdecode( InputArray buf, int flags, Mat* dst); + CV_EXPORTS_W bool imencode( const String& ext, InputArray img, CV_OUT std::vector& buf, - const std::vector& params=std::vector()); + const std::vector& params = std::vector()); -#ifndef CV_NO_VIDEO_CAPTURE_CPP_API +} // cv + + + +////////////////////////////////// video io ///////////////////////////////// + +typedef struct CvCapture CvCapture; +typedef struct CvVideoWriter CvVideoWriter; + +namespace cv +{ + +// Camera API +enum { CAP_ANY = 0, // autodetect + CAP_VFW = 200, // platform native + CAP_V4L = 200, + CAP_V4L2 = CAP_V4L, + CAP_FIREWARE = 300, // IEEE 1394 drivers + CAP_FIREWIRE = CAP_FIREWARE, + CAP_IEEE1394 = CAP_FIREWARE, + CAP_DC1394 = CAP_FIREWARE, + CAP_CMU1394 = CAP_FIREWARE, + CAP_QT = 500, // QuickTime + CAP_UNICAP = 600, // Unicap drivers + CAP_DSHOW = 700, // DirectShow (via videoInput) + CAP_PVAPI = 800, // PvAPI, Prosilica GigE SDK + CAP_OPENNI = 900, // OpenNI (for Kinect) + CAP_OPENNI_ASUS = 910, // OpenNI (for Asus Xtion) + CAP_ANDROID = 1000, // Android + CAP_XIAPI = 1100, // XIMEA Camera API + CAP_AVFOUNDATION = 1200, // AVFoundation framework for iOS (OS X Lion will have the same API) + CAP_GIGANETIX = 1300, // Smartek Giganetix GigEVisionSDK + CAP_MSMF = 1400 // Microsoft Media Foundation (via videoInput) + }; + +// generic properties (based on DC1394 properties) +enum { CAP_PROP_POS_MSEC =0, + CAP_PROP_POS_FRAMES =1, + CAP_PROP_POS_AVI_RATIO =2, + CAP_PROP_FRAME_WIDTH =3, + CAP_PROP_FRAME_HEIGHT =4, + CAP_PROP_FPS =5, + CAP_PROP_FOURCC =6, + CAP_PROP_FRAME_COUNT =7, + CAP_PROP_FORMAT =8, + CAP_PROP_MODE =9, + CAP_PROP_BRIGHTNESS =10, + CAP_PROP_CONTRAST =11, + CAP_PROP_SATURATION =12, + CAP_PROP_HUE =13, + CAP_PROP_GAIN =14, + CAP_PROP_EXPOSURE =15, + CAP_PROP_CONVERT_RGB =16, + CAP_PROP_WHITE_BALANCE_BLUE_U =17, + CAP_PROP_RECTIFICATION =18, + CAP_PROP_MONOCROME =19, + CAP_PROP_SHARPNESS =20, + CAP_PROP_AUTO_EXPOSURE =21, // DC1394: exposure control done by camera, user can adjust refernce level using this feature + CAP_PROP_GAMMA =22, + CAP_PROP_TEMPERATURE =23, + CAP_PROP_TRIGGER =24, + CAP_PROP_TRIGGER_DELAY =25, + CAP_PROP_WHITE_BALANCE_RED_V =26, + CAP_PROP_ZOOM =27, + CAP_PROP_FOCUS =28, + CAP_PROP_GUID =29, + CAP_PROP_ISO_SPEED =30, + CAP_PROP_BACKLIGHT =32, + CAP_PROP_PAN =33, + CAP_PROP_TILT =34, + CAP_PROP_ROLL =35, + CAP_PROP_IRIS =36, + CAP_PROP_SETTINGS =37 + }; + + +// DC1394 only +// modes of the controlling registers (can be: auto, manual, auto single push, absolute Latter allowed with any other mode) +// every feature can have only one mode turned on at a time +enum { CAP_PROP_DC1394_OFF = -4, //turn the feature off (not controlled manually nor automatically) + CAP_PROP_DC1394_MODE_MANUAL = -3, //set automatically when a value of the feature is set by the user + CAP_PROP_DC1394_MODE_AUTO = -2, + CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO = -1, + CAP_PROP_DC1394_MAX = 31 + }; + + +// OpenNI map generators +enum { CAP_OPENNI_DEPTH_GENERATOR = 1 << 31, + CAP_OPENNI_IMAGE_GENERATOR = 1 << 30, + CAP_OPENNI_GENERATORS_MASK = CAP_OPENNI_DEPTH_GENERATOR + CAP_OPENNI_IMAGE_GENERATOR + }; + +// Properties of cameras available through OpenNI interfaces +enum { CAP_PROP_OPENNI_OUTPUT_MODE = 100, + CAP_PROP_OPENNI_FRAME_MAX_DEPTH = 101, // in mm + CAP_PROP_OPENNI_BASELINE = 102, // in mm + CAP_PROP_OPENNI_FOCAL_LENGTH = 103, // in pixels + CAP_PROP_OPENNI_REGISTRATION = 104, // flag that synchronizes the remapping depth map to image map + // by changing depth generator's view point (if the flag is "on") or + // sets this view point to its normal one (if the flag is "off"). + CAP_PROP_OPENNI_REGISTRATION_ON = CAP_PROP_OPENNI_REGISTRATION, + CAP_PROP_OPENNI_APPROX_FRAME_SYNC = 105, + CAP_PROP_OPENNI_MAX_BUFFER_SIZE = 106, + CAP_PROP_OPENNI_CIRCLE_BUFFER = 107, + CAP_PROP_OPENNI_MAX_TIME_DURATION = 108, + CAP_PROP_OPENNI_GENERATOR_PRESENT = 109 + }; + +// OpenNI shortcats +enum { CAP_OPENNI_IMAGE_GENERATOR_PRESENT = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_GENERATOR_PRESENT, + CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE = CAP_OPENNI_IMAGE_GENERATOR + CAP_PROP_OPENNI_OUTPUT_MODE, + CAP_OPENNI_DEPTH_GENERATOR_BASELINE = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_BASELINE, + CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_FOCAL_LENGTH, + CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION = CAP_OPENNI_DEPTH_GENERATOR + CAP_PROP_OPENNI_REGISTRATION, + CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION_ON = CAP_OPENNI_DEPTH_GENERATOR_REGISTRATION + }; + +// OpenNI data given from depth generator +enum { CAP_OPENNI_DEPTH_MAP = 0, // Depth values in mm (CV_16UC1) + CAP_OPENNI_POINT_CLOUD_MAP = 1, // XYZ in meters (CV_32FC3) + CAP_OPENNI_DISPARITY_MAP = 2, // Disparity in pixels (CV_8UC1) + CAP_OPENNI_DISPARITY_MAP_32F = 3, // Disparity in pixels (CV_32FC1) + CAP_OPENNI_VALID_DEPTH_MASK = 4, // CV_8UC1 + + // Data given from RGB image generator + CAP_OPENNI_BGR_IMAGE = 5, + CAP_OPENNI_GRAY_IMAGE = 6 + }; + +// Supported output modes of OpenNI image generator +enum { CAP_OPENNI_VGA_30HZ = 0, + CAP_OPENNI_SXGA_15HZ = 1, + CAP_OPENNI_SXGA_30HZ = 2, + CAP_OPENNI_QVGA_30HZ = 3, + CAP_OPENNI_QVGA_60HZ = 4 + }; + + +// GStreamer +enum { CAP_PROP_GSTREAMER_QUEUE_LENGTH = 200 // default is 1 + }; + + +// PVAPI +enum { CAP_PROP_PVAPI_MULTICASTIP = 300 // ip for anable multicast master mode. 0 for disable multicast + }; + + +// Properties of cameras available through XIMEA SDK interface +enum { CAP_PROP_XI_DOWNSAMPLING = 400, // Change image resolution by binning or skipping. + CAP_PROP_XI_DATA_FORMAT = 401, // Output data format. + CAP_PROP_XI_OFFSET_X = 402, // Horizontal offset from the origin to the area of interest (in pixels). + CAP_PROP_XI_OFFSET_Y = 403, // Vertical offset from the origin to the area of interest (in pixels). + CAP_PROP_XI_TRG_SOURCE = 404, // Defines source of trigger. + CAP_PROP_XI_TRG_SOFTWARE = 405, // Generates an internal trigger. PRM_TRG_SOURCE must be set to TRG_SOFTWARE. + CAP_PROP_XI_GPI_SELECTOR = 406, // Selects general purpose input + CAP_PROP_XI_GPI_MODE = 407, // Set general purpose input mode + CAP_PROP_XI_GPI_LEVEL = 408, // Get general purpose level + CAP_PROP_XI_GPO_SELECTOR = 409, // Selects general purpose output + CAP_PROP_XI_GPO_MODE = 410, // Set general purpose output mode + CAP_PROP_XI_LED_SELECTOR = 411, // Selects camera signalling LED + CAP_PROP_XI_LED_MODE = 412, // Define camera signalling LED functionality + CAP_PROP_XI_MANUAL_WB = 413, // Calculates White Balance(must be called during acquisition) + CAP_PROP_XI_AUTO_WB = 414, // Automatic white balance + CAP_PROP_XI_AEAG = 415, // Automatic exposure/gain + CAP_PROP_XI_EXP_PRIORITY = 416, // Exposure priority (0.5 - exposure 50%, gain 50%). + CAP_PROP_XI_AE_MAX_LIMIT = 417, // Maximum limit of exposure in AEAG procedure + CAP_PROP_XI_AG_MAX_LIMIT = 418, // Maximum limit of gain in AEAG procedure + CAP_PROP_XI_AEAG_LEVEL = 419, // Average intensity of output signal AEAG should achieve(in %) + CAP_PROP_XI_TIMEOUT = 420 // Image capture timeout in milliseconds + }; + + +// Properties for Android cameras +enum { CAP_PROP_ANDROID_AUTOGRAB = 1024, + CAP_PROP_ANDROID_PREVIEW_SIZES_STRING = 1025, // readonly, tricky property, returns const char* indeed + CAP_PROP_ANDROID_PREVIEW_FORMAT = 1026, // readonly, tricky property, returns const char* indeed + CAP_PROP_ANDROID_FLASH_MODE = 8001, + CAP_PROP_ANDROID_FOCUS_MODE = 8002, + CAP_PROP_ANDROID_WHITE_BALANCE = 8003, + CAP_PROP_ANDROID_ANTIBANDING = 8004, + CAP_PROP_ANDROID_FOCAL_LENGTH = 8005, + CAP_PROP_ANDROID_FOCUS_DISTANCE_NEAR = 8006, + CAP_PROP_ANDROID_FOCUS_DISTANCE_OPTIMAL = 8007, + CAP_PROP_ANDROID_FOCUS_DISTANCE_FAR = 8008 + }; + + +// Android camera output formats +enum { CAP_ANDROID_COLOR_FRAME_BGR = 0, //BGR + CAP_ANDROID_COLOR_FRAME = CAP_ANDROID_COLOR_FRAME_BGR, + CAP_ANDROID_GREY_FRAME = 1, //Y + CAP_ANDROID_COLOR_FRAME_RGB = 2, + CAP_ANDROID_COLOR_FRAME_BGRA = 3, + CAP_ANDROID_COLOR_FRAME_RGBA = 4 + }; + + +// Android camera flash modes +enum { CAP_ANDROID_FLASH_MODE_AUTO = 0, + CAP_ANDROID_FLASH_MODE_OFF = 1, + CAP_ANDROID_FLASH_MODE_ON = 2, + CAP_ANDROID_FLASH_MODE_RED_EYE = 3, + CAP_ANDROID_FLASH_MODE_TORCH = 4 + }; + + +// Android camera focus modes +enum { CAP_ANDROID_FOCUS_MODE_AUTO = 0, + CAP_ANDROID_FOCUS_MODE_CONTINUOUS_VIDEO = 1, + CAP_ANDROID_FOCUS_MODE_EDOF = 2, + CAP_ANDROID_FOCUS_MODE_FIXED = 3, + CAP_ANDROID_FOCUS_MODE_INFINITY = 4, + CAP_ANDROID_FOCUS_MODE_MACRO = 5 + }; + + +// Android camera white balance modes +enum { CAP_ANDROID_WHITE_BALANCE_AUTO = 0, + CAP_ANDROID_WHITE_BALANCE_CLOUDY_DAYLIGHT = 1, + CAP_ANDROID_WHITE_BALANCE_DAYLIGHT = 2, + CAP_ANDROID_WHITE_BALANCE_FLUORESCENT = 3, + CAP_ANDROID_WHITE_BALANCE_INCANDESCENT = 4, + CAP_ANDROID_WHITE_BALANCE_SHADE = 5, + CAP_ANDROID_WHITE_BALANCE_TWILIGHT = 6, + CAP_ANDROID_WHITE_BALANCE_WARM_FLUORESCENT = 7 + }; + + +// Android camera antibanding modes +enum { CAP_ANDROID_ANTIBANDING_50HZ = 0, + CAP_ANDROID_ANTIBANDING_60HZ = 1, + CAP_ANDROID_ANTIBANDING_AUTO = 2, + CAP_ANDROID_ANTIBANDING_OFF = 3 + }; + + +// Properties of cameras available through AVFOUNDATION interface +enum { CAP_PROP_IOS_DEVICE_FOCUS = 9001, + CAP_PROP_IOS_DEVICE_EXPOSURE = 9002, + CAP_PROP_IOS_DEVICE_FLASH = 9003, + CAP_PROP_IOS_DEVICE_WHITEBALANCE = 9004, + CAP_PROP_IOS_DEVICE_TORCH = 9005 + }; + + +// Properties of cameras available through Smartek Giganetix Ethernet Vision interface +/* --- Vladimir Litvinenko (litvinenko.vladimir@gmail.com) --- */ +enum { CAP_PROP_GIGA_FRAME_OFFSET_X = 10001, + CAP_PROP_GIGA_FRAME_OFFSET_Y = 10002, + CAP_PROP_GIGA_FRAME_WIDTH_MAX = 10003, + CAP_PROP_GIGA_FRAME_HEIGH_MAX = 10004, + CAP_PROP_GIGA_FRAME_SENS_WIDTH = 10005, + CAP_PROP_GIGA_FRAME_SENS_HEIGH = 10006 + }; -template<> void CV_EXPORTS Ptr::delete_obj(); -template<> void CV_EXPORTS Ptr::delete_obj(); class CV_EXPORTS_W VideoCapture { @@ -210,7 +509,7 @@ public: CV_WRAP virtual void release(); CV_WRAP virtual bool grab(); - CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int channel=0); + CV_WRAP virtual bool retrieve(CV_OUT Mat& image, int flag = 0); virtual VideoCapture& operator >> (CV_OUT Mat& image); CV_WRAP virtual bool read(CV_OUT Mat& image); @@ -227,24 +526,25 @@ class CV_EXPORTS_W VideoWriter public: CV_WRAP VideoWriter(); CV_WRAP VideoWriter(const String& filename, int fourcc, double fps, - Size frameSize, bool isColor=true); + Size frameSize, bool isColor = true); virtual ~VideoWriter(); CV_WRAP virtual bool open(const String& filename, int fourcc, double fps, - Size frameSize, bool isColor=true); + Size frameSize, bool isColor = true); CV_WRAP virtual bool isOpened() const; CV_WRAP virtual void release(); virtual VideoWriter& operator << (const Mat& image); CV_WRAP virtual void write(const Mat& image); + CV_WRAP static int fourcc(char c1, char c2, char c3, char c4); + protected: Ptr writer; }; -#endif +template<> void Ptr::delete_obj(); +template<> void Ptr::delete_obj(); -} - -#endif +} // cv #endif diff --git a/modules/highgui/include/opencv2/highgui/highgui_c.h b/modules/highgui/include/opencv2/highgui/highgui_c.h index c29a1dca70..66f8bd0e9a 100644 --- a/modules/highgui/include/opencv2/highgui/highgui_c.h +++ b/modules/highgui/include/opencv2/highgui/highgui_c.h @@ -570,9 +570,6 @@ CVAPI(CvVideoWriter*) cvCreateVideoWriter( const char* filename, int fourcc, double fps, CvSize frame_size, int is_color CV_DEFAULT(1)); -//CVAPI(CvVideoWriter*) cvCreateImageSequenceWriter( const char* filename, -// int is_color CV_DEFAULT(1)); - /* write frame to video file */ CVAPI(int) cvWriteFrame( CvVideoWriter* writer, const IplImage* image ); diff --git a/modules/highgui/perf/perf_output.cpp b/modules/highgui/perf/perf_output.cpp index 50d86adf78..ee26f3a425 100644 --- a/modules/highgui/perf/perf_output.cpp +++ b/modules/highgui/perf/perf_output.cpp @@ -23,7 +23,7 @@ PERF_TEST_P(VideoWriter_Writing, WriteFrame, string filename = getDataPath(get<0>(GetParam())); bool isColor = get<1>(GetParam()); - VideoWriter writer(cv::tempfile(".avi"), CV_FOURCC('X', 'V', 'I', 'D'), 25, cv::Size(640, 480), isColor); + VideoWriter writer(cv::tempfile(".avi"), VideoWriter::fourcc('X', 'V', 'I', 'D'), 25, cv::Size(640, 480), isColor); TEST_CYCLE() { Mat image = imread(filename, 1); writer << image; } diff --git a/modules/highgui/src/cap.cpp b/modules/highgui/src/cap.cpp index 0e33a81a81..8eb9d0d6ee 100644 --- a/modules/highgui/src/cap.cpp +++ b/modules/highgui/src/cap.cpp @@ -540,9 +540,9 @@ double VideoCapture::get(int propId) VideoWriter::VideoWriter() {} -VideoWriter::VideoWriter(const String& filename, int fourcc, double fps, Size frameSize, bool isColor) +VideoWriter::VideoWriter(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor) { - open(filename, fourcc, fps, frameSize, isColor); + open(filename, _fourcc, fps, frameSize, isColor); } void VideoWriter::release() @@ -555,9 +555,9 @@ VideoWriter::~VideoWriter() release(); } -bool VideoWriter::open(const String& filename, int fourcc, double fps, Size frameSize, bool isColor) +bool VideoWriter::open(const String& filename, int _fourcc, double fps, Size frameSize, bool isColor) { - writer = cvCreateVideoWriter(filename.c_str(), fourcc, fps, frameSize, isColor); + writer = cvCreateVideoWriter(filename.c_str(), _fourcc, fps, frameSize, isColor); return isOpened(); } @@ -578,4 +578,9 @@ VideoWriter& VideoWriter::operator << (const Mat& image) return *this; } +int VideoWriter::fourcc(char c1, char c2, char c3, char c4) +{ + return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24); +} + } diff --git a/modules/highgui/src/cap_ffmpeg.cpp b/modules/highgui/src/cap_ffmpeg.cpp index 11bb80c8f1..8a370147f2 100644 --- a/modules/highgui/src/cap_ffmpeg.cpp +++ b/modules/highgui/src/cap_ffmpeg.cpp @@ -41,7 +41,7 @@ #include "precomp.hpp" -#ifdef HAVE_FFMPEG +#ifndef WIN32 #include "cap_ffmpeg_impl.hpp" #else #include "cap_ffmpeg_api.hpp" diff --git a/modules/highgui/src/ffmpeg_codecs.hpp b/modules/highgui/src/ffmpeg_codecs.hpp index ca7f55c656..02430ef114 100644 --- a/modules/highgui/src/ffmpeg_codecs.hpp +++ b/modules/highgui/src/ffmpeg_codecs.hpp @@ -61,7 +61,9 @@ extern "C" { #endif #ifdef WIN32 - #include +# define AVUTIL_COMMON_H +# define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) +# include #else // if the header path is not specified explicitly, let's deduce it diff --git a/modules/highgui/src/precomp.hpp b/modules/highgui/src/precomp.hpp index 5062be73c5..9c13ef1281 100644 --- a/modules/highgui/src/precomp.hpp +++ b/modules/highgui/src/precomp.hpp @@ -199,41 +199,4 @@ void cvSetRatioWindow_QT(const char* name,double prop_value); double cvGetOpenGlProp_QT(const char* name); #endif - - -/*namespace cv -{ - -class CV_EXPORTS BaseWindow -{ -public: - BaseWindow(const String& name, int flags=0); - virtual ~BaseWindow(); - virtual void close(); - virtual void show(const Mat& mat); - virtual void resize(Size size); - virtual void move(Point topleft); - virtual Size size() const; - virtual Point topLeft() const; - virtual void setGeometry(Point topLeft, Size size); - virtual void getGeometry(Point& topLeft, Size& size) const; - virtual String getTitle() const; - virtual void setTitle(const String& str); - virtual String getName() const; - virtual void setScaleMode(int mode); - virtual int getScaleMode(); - virtual void setScrollPos(double pos); - virtual double getScrollPos() const; - virtual void setScale(double scale); - virtual double getScale() const; - virtual Point getImageCoords(Point pos) const; - virtual Scalar getPixelValue(Point pos, const String& colorspace=String()) const; - - virtual void addTrackbar( const String& trackbar, int low, int high, int step ); -}; - -typedef Ptr Window; - -}*/ - #endif /* __HIGHGUI_H_ */ diff --git a/modules/highgui/src/window.cpp b/modules/highgui/src/window.cpp index 8de6dbfdef..fdaadc7cb6 100644 --- a/modules/highgui/src/window.cpp +++ b/modules/highgui/src/window.cpp @@ -342,15 +342,16 @@ CV_IMPL void cvUpdateWindow(const char*) #if defined (HAVE_QT) -CvFont cv::fontQt(const String& nameFont, int pointSize, Scalar color, int weight, int style, int /*spacing*/) +cv::QtFont cv::fontQt(const String& nameFont, int pointSize, Scalar color, int weight, int style, int /*spacing*/) { -return cvFontQt(nameFont.c_str(), pointSize,color,weight, style); + CvFont f = cvFontQt(nameFont.c_str(), pointSize,color,weight, style); + return *(cv::QtFont*)(&f); } -void cv::addText( const Mat& img, const String& text, Point org, CvFont font) +void cv::addText( const Mat& img, const String& text, Point org, const QtFont& font) { CvMat _img = img; - cvAddText( &_img, text.c_str(), org,&font); + cvAddText( &_img, text.c_str(), org, (CvFont*)&font); } void cv::displayStatusBar(const String& name, const String& text, int delayms) @@ -390,13 +391,13 @@ int cv::createButton(const String& button_name, ButtonCallback on_change, void* #else -CvFont cv::fontQt(const String&, int, Scalar, int, int, int) +cv::QtFont cv::fontQt(const String&, int, Scalar, int, int, int) { CV_Error(CV_StsNotImplemented, "The library is compiled without QT support"); - return CvFont(); + return QtFont(); } -void cv::addText( const Mat&, const String&, Point, CvFont) +void cv::addText( const Mat&, const String&, Point, const QtFont&) { CV_Error(CV_StsNotImplemented, "The library is compiled without QT support"); } diff --git a/modules/highgui/test/test_ffmpeg.cpp b/modules/highgui/test/test_ffmpeg.cpp index cd8356c517..20ba7c5cbb 100644 --- a/modules/highgui/test/test_ffmpeg.cpp +++ b/modules/highgui/test/test_ffmpeg.cpp @@ -95,15 +95,15 @@ public: double fps = fps0; Size frame_s = Size(img_c, img_r); - if( tag == CV_FOURCC('H', '2', '6', '1') ) + if( tag == VideoWriter::fourcc('H', '2', '6', '1') ) frame_s = Size(352, 288); - else if( tag == CV_FOURCC('H', '2', '6', '3') ) + else if( tag == VideoWriter::fourcc('H', '2', '6', '3') ) frame_s = Size(704, 576); /*else if( tag == CV_FOURCC('M', 'J', 'P', 'G') || tag == CV_FOURCC('j', 'p', 'e', 'g') ) frame_s = Size(1920, 1080);*/ - if( tag == CV_FOURCC('M', 'P', 'E', 'G') ) + if( tag == VideoWriter::fourcc('M', 'P', 'E', 'G') ) fps = 25; VideoWriter writer(filename, tag, fps, frame_s); @@ -201,7 +201,7 @@ public: std::string fileName = tempfile(stream.str().c_str()); files->operator[](i) = fileName; - writers->operator[](i) = new VideoWriter(fileName, CV_FOURCC('X','V','I','D'), 25.0f, FrameSize); + writers->operator[](i) = new VideoWriter(fileName, VideoWriter::fourcc('X','V','I','D'), 25.0f, FrameSize); CV_Assert(writers->operator[](i)->isOpened()); } @@ -311,7 +311,7 @@ public: CV_Assert(capture->isOpened()); const static double eps = 23.0; - unsigned int frameCount = static_cast(capture->get(CV_CAP_PROP_FRAME_COUNT)); + unsigned int frameCount = static_cast(capture->get(CAP_PROP_FRAME_COUNT)); CV_Assert(frameCount == WriteVideo_Invoker::FrameCount); Mat reference(CreateVideoWriterInvoker::FrameSize, CV_8UC3); diff --git a/modules/highgui/test/test_framecount.cpp b/modules/highgui/test/test_framecount.cpp index 69ebf5cf83..875ce6aca4 100644 --- a/modules/highgui/test/test_framecount.cpp +++ b/modules/highgui/test/test_framecount.cpp @@ -41,7 +41,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" #include using namespace cv; @@ -91,7 +91,7 @@ void CV_FramecountTest::run(int) FrameCount++; } - int framecount = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_COUNT); + int framecount = (int)cvGetCaptureProperty(cap, CAP_PROP_FRAME_COUNT); ts->printf(cvtest::TS::LOG, "\nFile information (video %d): \n"\ "\nName: big_buck_bunny.%s\nActual frame count: %d\n"\ diff --git a/modules/highgui/test/test_grfmt.cpp b/modules/highgui/test/test_grfmt.cpp index d050c46891..7fc80b2c17 100644 --- a/modules/highgui/test/test_grfmt.cpp +++ b/modules/highgui/test/test_grfmt.cpp @@ -113,7 +113,7 @@ public: imwrite(img_path, img); ts->printf(ts->LOG, "reading test image : %s\n", img_path.c_str()); - Mat img_test = imread(img_path, CV_LOAD_IMAGE_UNCHANGED); + Mat img_test = imread(img_path, IMREAD_UNCHANGED); if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH); @@ -140,11 +140,11 @@ public: string filename = cv::tempfile(".jpg"); imwrite(filename, img); - img = imread(filename, CV_LOAD_IMAGE_UNCHANGED); + img = imread(filename, IMREAD_UNCHANGED); filename = string(ts->get_data_path() + "readwrite/test_" + char(k + 48) + "_c" + char(num_channels + 48) + ".jpg"); ts->printf(ts->LOG, "reading test image : %s\n", filename.c_str()); - Mat img_test = imread(filename, CV_LOAD_IMAGE_UNCHANGED); + Mat img_test = imread(filename, IMREAD_UNCHANGED); if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH); @@ -171,7 +171,7 @@ public: string filename = cv::tempfile(".tiff"); imwrite(filename, img); ts->printf(ts->LOG, "reading test image : %s\n", filename.c_str()); - Mat img_test = imread(filename, CV_LOAD_IMAGE_UNCHANGED); + Mat img_test = imread(filename, IMREAD_UNCHANGED); if (img_test.empty()) ts->set_failed_test_info(ts->FAIL_MISMATCH); @@ -242,12 +242,12 @@ public: Mat im = Mat::zeros(1000,1000, CV_8U); //randu(im, 0, 256); vector param; - param.push_back(CV_IMWRITE_PNG_COMPRESSION); + param.push_back(IMWRITE_PNG_COMPRESSION); param.push_back(3); //default(3) 0-9. cv::imencode(".png" ,im ,buff, param); // hangs - Mat im2 = imdecode(buff,CV_LOAD_IMAGE_ANYDEPTH); + Mat im2 = imdecode(buff,IMREAD_ANYDEPTH); } catch(...) { @@ -375,7 +375,7 @@ TEST(Highgui_WebP, encode_decode_lossless_webp) remove(output.c_str()); - cv::Mat decode = cv::imdecode(buf, CV_LOAD_IMAGE_COLOR); + cv::Mat decode = cv::imdecode(buf, IMREAD_COLOR); ASSERT_FALSE(decode.empty()); EXPECT_TRUE(cv::norm(decode, img_webp, NORM_INF) == 0); @@ -394,7 +394,7 @@ TEST(Highgui_WebP, encode_decode_lossy_webp) for(int q = 100; q>=0; q-=10) { std::vector params; - params.push_back(CV_IMWRITE_WEBP_QUALITY); + params.push_back(IMWRITE_WEBP_QUALITY); params.push_back(q); string output = cv::tempfile(".webp"); diff --git a/modules/highgui/test/test_gui.cpp b/modules/highgui/test/test_gui.cpp index 106a64b873..e53accfe71 100644 --- a/modules/highgui/test/test_gui.cpp +++ b/modules/highgui/test/test_gui.cpp @@ -59,7 +59,7 @@ void Foo(int /*k*/, void* /*z*/) {} void CV_HighGuiOnlyGuiTest::run( int /*start_from */) { ts->printf(ts->LOG, "GUI 0\n"); - cvDestroyAllWindows(); + destroyAllWindows(); ts->printf(ts->LOG, "GUI 1\n"); namedWindow("Win"); @@ -84,7 +84,7 @@ void CV_HighGuiOnlyGuiTest::run( int /*start_from */) waitKey(500); ts->printf(ts->LOG, "GUI 8\n"); - cvDestroyAllWindows(); + destroyAllWindows(); ts->set_failed_test_info(cvtest::TS::OK); } diff --git a/modules/highgui/test/test_positioning.cpp b/modules/highgui/test/test_positioning.cpp index a13aab0f54..3968393536 100644 --- a/modules/highgui/test/test_positioning.cpp +++ b/modules/highgui/test/test_positioning.cpp @@ -41,7 +41,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" #include using namespace cv; @@ -88,7 +88,7 @@ CV_VideoRandomPositioningTest::~CV_VideoRandomPositioningTest() {} void CV_VideoPositioningTest::generate_idx_seq(CvCapture* cap, int method) { idx.clear(); - int N = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_FRAME_COUNT); + int N = (int)cvGetCaptureProperty(cap, CAP_PROP_FRAME_COUNT); switch(method) { case PROGRESSIVE: @@ -147,7 +147,7 @@ void CV_VideoPositioningTest::run_test(int method) failed_videos++; continue; } - cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, 0); + cvSetCaptureProperty(cap, CAP_PROP_POS_FRAMES, 0); generate_idx_seq(cap, method); @@ -157,7 +157,7 @@ void CV_VideoPositioningTest::run_test(int method) { bool flag = false; - cvSetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES, idx.at(j)); + cvSetCaptureProperty(cap, CAP_PROP_POS_FRAMES, idx.at(j)); /* IplImage* frame = cvRetrieveFrame(cap); @@ -173,7 +173,7 @@ void CV_VideoPositioningTest::run_test(int method) flag = !flag; } */ - int val = (int)cvGetCaptureProperty(cap, CV_CAP_PROP_POS_FRAMES); + int val = (int)cvGetCaptureProperty(cap, CAP_PROP_POS_FRAMES); if (idx.at(j) != val) { diff --git a/modules/highgui/test/test_precomp.hpp b/modules/highgui/test/test_precomp.hpp index 863cddcaf9..d904b4cb45 100644 --- a/modules/highgui/test/test_precomp.hpp +++ b/modules/highgui/test/test_precomp.hpp @@ -12,6 +12,7 @@ #include #include "opencv2/ts.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/highgui.hpp" #include "opencv2/imgproc/imgproc_c.h" #include "opencv2/core/private.hpp" diff --git a/modules/highgui/test/test_video_io.cpp b/modules/highgui/test/test_video_io.cpp index bdc032f0d2..8738cef3ee 100644 --- a/modules/highgui/test/test_video_io.cpp +++ b/modules/highgui/test/test_video_io.cpp @@ -41,7 +41,7 @@ //M*/ #include "test_precomp.hpp" -#include "opencv2/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" using namespace cv; using namespace std; @@ -56,15 +56,15 @@ string fourccToString(int fourcc) const VideoFormat g_specific_fmt_list[] = { - VideoFormat("avi", CV_FOURCC('X', 'V', 'I', 'D')), - VideoFormat("avi", CV_FOURCC('M', 'P', 'E', 'G')), - VideoFormat("avi", CV_FOURCC('M', 'J', 'P', 'G')), - //VideoFormat("avi", CV_FOURCC('I', 'Y', 'U', 'V')), - VideoFormat("mkv", CV_FOURCC('X', 'V', 'I', 'D')), - VideoFormat("mkv", CV_FOURCC('M', 'P', 'E', 'G')), - VideoFormat("mkv", CV_FOURCC('M', 'J', 'P', 'G')), + VideoFormat("avi", VideoWriter::fourcc('X', 'V', 'I', 'D')), + VideoFormat("avi", VideoWriter::fourcc('M', 'P', 'E', 'G')), + VideoFormat("avi", VideoWriter::fourcc('M', 'J', 'P', 'G')), + //VideoFormat("avi", VideoWriter::fourcc('I', 'Y', 'U', 'V')), + VideoFormat("mkv", VideoWriter::fourcc('X', 'V', 'I', 'D')), + VideoFormat("mkv", VideoWriter::fourcc('M', 'P', 'E', 'G')), + VideoFormat("mkv", VideoWriter::fourcc('M', 'J', 'P', 'G')), - VideoFormat("mov", CV_FOURCC('m', 'p', '4', 'v')), + VideoFormat("mov", VideoWriter::fourcc('m', 'p', '4', 'v')), VideoFormat() }; @@ -416,7 +416,7 @@ void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor for( size_t i = 0; i < IMAGE_COUNT; ++i ) { string file_path = format("%s../python/images/QCIF_%02d.bmp", dir.c_str(), i); - Mat img = imread(file_path, CV_LOAD_IMAGE_COLOR); + Mat img = imread(file_path, IMREAD_COLOR); if (img.empty()) { @@ -442,7 +442,7 @@ void CV_HighGuiTest::SpecificVideoTest(const string& dir, const cvtest::VideoFor writer.release(); VideoCapture cap(video_file); - size_t FRAME_COUNT = (size_t)cap.get(CV_CAP_PROP_FRAME_COUNT); + size_t FRAME_COUNT = (size_t)cap.get(CAP_PROP_FRAME_COUNT); if (FRAME_COUNT != IMAGE_COUNT ) { diff --git a/modules/highgui/test/test_video_pos.cpp b/modules/highgui/test/test_video_pos.cpp index a9fa36c250..37cf8039e0 100644 --- a/modules/highgui/test/test_video_pos.cpp +++ b/modules/highgui/test/test_video_pos.cpp @@ -110,9 +110,9 @@ public: return; } - int N0 = (int)cap.get(CV_CAP_PROP_FRAME_COUNT); - cap.set(CV_CAP_PROP_POS_FRAMES, 0); - int N = (int)cap.get(CV_CAP_PROP_FRAME_COUNT); + int N0 = (int)cap.get(CAP_PROP_FRAME_COUNT); + cap.set(CAP_PROP_POS_FRAMES, 0); + int N = (int)cap.get(CAP_PROP_FRAME_COUNT); if (N != n_frames || N != N0) { @@ -125,14 +125,14 @@ public: { int idx = theRNG().uniform(0, N); - if( !cap.set(CV_CAP_PROP_POS_FRAMES, idx) ) + if( !cap.set(CAP_PROP_POS_FRAMES, idx) ) { ts->printf(ts->LOG, "\nError: cannot seek to frame %d.\n", idx); ts->set_failed_test_info(ts->FAIL_INVALID_OUTPUT); return; } - int idx1 = (int)cap.get(CV_CAP_PROP_POS_FRAMES); + int idx1 = (int)cap.get(CAP_PROP_POS_FRAMES); Mat img; cap >> img; Mat img0 = drawFrame(idx); diff --git a/modules/imgproc/test/test_color.cpp b/modules/imgproc/test/test_color.cpp index 86f9c67564..0434c6c13b 100644 --- a/modules/imgproc/test/test_color.cpp +++ b/modules/imgproc/test/test_color.cpp @@ -1687,8 +1687,8 @@ TEST(Imgproc_ColorBayer, regression) { cvtest::TS* ts = cvtest::TS::ptr(); - Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", CV_LOAD_IMAGE_GRAYSCALE); - Mat gold = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_gold.png", CV_LOAD_IMAGE_UNCHANGED); + Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE); + Mat gold = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_gold.png", IMREAD_UNCHANGED); Mat result; CV_Assert(given.data != NULL && gold.data != NULL); @@ -1709,9 +1709,9 @@ TEST(Imgproc_ColorBayerVNG, regression) { cvtest::TS* ts = cvtest::TS::ptr(); - Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", CV_LOAD_IMAGE_GRAYSCALE); + Mat given = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE); string goldfname = string(ts->get_data_path()) + "/cvtcolor/bayerVNG_gold.png"; - Mat gold = imread(goldfname, CV_LOAD_IMAGE_UNCHANGED); + Mat gold = imread(goldfname, IMREAD_UNCHANGED); Mat result; CV_Assert(given.data != NULL); @@ -1804,7 +1804,7 @@ TEST(Imgproc_ColorBayerVNG_Strict, regression) Mat src, dst, bayer, reference; std::string full_path = parent_path + image_name; - src = imread(full_path, CV_LOAD_IMAGE_UNCHANGED); + src = imread(full_path, IMREAD_UNCHANGED); if (src.data == NULL) { @@ -1824,7 +1824,7 @@ TEST(Imgproc_ColorBayerVNG_Strict, regression) // reading a reference image full_path = parent_path + pattern[i] + image_name; - reference = imread(full_path, CV_LOAD_IMAGE_UNCHANGED); + reference = imread(full_path, IMREAD_UNCHANGED); if (reference.data == NULL) { imwrite(full_path, dst); @@ -2091,7 +2091,7 @@ TEST(ImgProc_BayerEdgeAwareDemosaicing, accuracy) Mat src, bayer; std::string full_path = parent_path + image_name; - src = imread(full_path, CV_LOAD_IMAGE_UNCHANGED); + src = imread(full_path, IMREAD_UNCHANGED); if (src.data == NULL) { @@ -2141,7 +2141,7 @@ TEST(ImgProc_BayerEdgeAwareDemosaicing, accuracy) TEST(ImgProc_Bayer2RGBA, accuracy) { cvtest::TS* ts = cvtest::TS::ptr(); - Mat raw = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", CV_LOAD_IMAGE_GRAYSCALE); + Mat raw = imread(string(ts->get_data_path()) + "/cvtcolor/bayer_input.png", IMREAD_GRAYSCALE); Mat rgb, reference; CV_Assert(raw.channels() == 1); diff --git a/modules/imgproc/test/test_grabcut.cpp b/modules/imgproc/test/test_grabcut.cpp index f1b1c080df..cfc17dc946 100644 --- a/modules/imgproc/test/test_grabcut.cpp +++ b/modules/imgproc/test/test_grabcut.cpp @@ -143,8 +143,8 @@ TEST(Imgproc_GrabCut, repeatability) { cvtest::TS& ts = *cvtest::TS::ptr(); - Mat image_1 = imread(string(ts.get_data_path()) + "grabcut/image1652.ppm", CV_LOAD_IMAGE_COLOR); - Mat mask_1 = imread(string(ts.get_data_path()) + "grabcut/mask1652.ppm", CV_LOAD_IMAGE_GRAYSCALE); + Mat image_1 = imread(string(ts.get_data_path()) + "grabcut/image1652.ppm", IMREAD_COLOR); + Mat mask_1 = imread(string(ts.get_data_path()) + "grabcut/mask1652.ppm", IMREAD_GRAYSCALE); Rect roi_1(0, 0, 150, 150); Mat image_2 = image_1.clone(); diff --git a/modules/imgproc/test/test_precomp.hpp b/modules/imgproc/test/test_precomp.hpp index 43866800a5..9650b7fcc8 100644 --- a/modules/imgproc/test/test_precomp.hpp +++ b/modules/imgproc/test/test_precomp.hpp @@ -12,8 +12,8 @@ #include #include "opencv2/ts.hpp" #include "opencv2/imgproc.hpp" -#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/highgui.hpp" -#include "opencv2/highgui/highgui_c.h" + +#include "opencv2/imgproc/imgproc_c.h" #endif diff --git a/modules/java/generator/src/cpp/VideoCapture.cpp b/modules/java/generator/src/cpp/VideoCapture.cpp index f0e5c233b6..312d710202 100644 --- a/modules/java/generator/src/cpp/VideoCapture.cpp +++ b/modules/java/generator/src/cpp/VideoCapture.cpp @@ -4,7 +4,6 @@ #include "opencv2/opencv_modules.hpp" #ifdef HAVE_OPENCV_HIGHGUI -#include "opencv2/highgui/highgui_c.h" #include "opencv2/highgui.hpp" using namespace cv; @@ -394,7 +393,7 @@ JNIEXPORT jstring JNICALL Java_org_opencv_highgui_VideoCapture_n_1getSupportedPr VideoCapture* me = (VideoCapture*) self; //TODO: check for NULL union {double prop; const char* name;} u; - u.prop = me->get(CV_CAP_PROP_SUPPORTED_PREVIEW_SIZES_STRING); + u.prop = me->get(CAP_PROP_ANDROID_PREVIEW_SIZES_STRING); return env->NewStringUTF(u.name); } catch(cv::Exception e) { @@ -432,4 +431,4 @@ JNIEXPORT void JNICALL Java_org_opencv_highgui_VideoCapture_n_1delete } // extern "C" -#endif // HAVE_OPENCV_HIGHGUI \ No newline at end of file +#endif // HAVE_OPENCV_HIGHGUI diff --git a/modules/legacy/src/oneway.cpp b/modules/legacy/src/oneway.cpp index acb5e3cda9..7fa4bee33e 100644 --- a/modules/legacy/src/oneway.cpp +++ b/modules/legacy/src/oneway.cpp @@ -669,7 +669,7 @@ namespace cv{ cvConvertScale(m_samples[i], patch, 255/maxval); #ifdef HAVE_OPENCV_HIGHGUI - cvSaveImage(buf, patch); + cv::imwrite(buf, cv::cvarrToMat(patch)); #else CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without image I/O support"); #endif @@ -1801,17 +1801,16 @@ namespace cv{ sprintf(filename, "%s/%s", path, imagename); //printf("Reading image %s...", filename); - IplImage* img = 0; + IplImage img; #ifdef HAVE_OPENCV_HIGHGUI - img = cvLoadImage(filename, CV_LOAD_IMAGE_GRAYSCALE); + Mat img2 = cv::imread(filename, IMREAD_GRAYSCALE); + img = img2; #else CV_Error(CV_StsNotImplemented, "OpenCV has been compiled without image I/O support"); #endif //printf("done\n"); - extractPatches (img, patches, patch_size); - - cvReleaseImage(&img); + extractPatches (&img, patches, patch_size); } fclose(pFile); } diff --git a/modules/objdetect/src/latentsvm.cpp b/modules/objdetect/src/latentsvm.cpp index 521f0fdf56..641e634a6b 100644 --- a/modules/objdetect/src/latentsvm.cpp +++ b/modules/objdetect/src/latentsvm.cpp @@ -389,7 +389,7 @@ int showRootFilterBoxes(IplImage *image, color, thickness, line_type, shift); } #ifdef HAVE_OPENCV_HIGHGUI - cvShowImage("Initial image", image); + cv::imshow("Initial image", cv::cvarrToMat(image)); #endif return LATENT_SVM_OK; } @@ -445,7 +445,7 @@ int showPartFilterBoxes(IplImage *image, } } #ifdef HAVE_OPENCV_HIGHGUI - cvShowImage("Initial image", image); + cv::imshow("Initial image", cv::cvarrToMat(image)); #endif return LATENT_SVM_OK; } @@ -481,7 +481,7 @@ int showBoxes(IplImage *img, color, thickness, line_type, shift); } #ifdef HAVE_OPENCV_HIGHGUI - cvShowImage("Initial image", img); + cv::imshow("Initial image", cv::cvarrToMat(img)); #endif return LATENT_SVM_OK; } diff --git a/modules/objdetect/test/test_latentsvmdetector.cpp b/modules/objdetect/test/test_latentsvmdetector.cpp index 52bc36d7df..9c4ed25340 100644 --- a/modules/objdetect/test/test_latentsvmdetector.cpp +++ b/modules/objdetect/test/test_latentsvmdetector.cpp @@ -82,8 +82,9 @@ void CV_LatentSVMDetectorTest::run( int /* start_from */) init.initialize(numThreads); #endif - IplImage* image = cvLoadImage(img_path.c_str()); - if (!image) + Mat image2 = cv::imread(img_path.c_str()); + IplImage image = image2; + if (image2.empty()) { ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); return; @@ -93,13 +94,12 @@ void CV_LatentSVMDetectorTest::run( int /* start_from */) if (!detector) { ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_TEST_DATA ); - cvReleaseImage(&image); return; } CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* detections = 0; - detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads); + detections = cvLatentSvmDetectObjects(&image, detector, storage, 0.5f, numThreads); if (detections->total != num_detections) { ts->set_failed_test_info( cvtest::TS::FAIL_MISMATCH ); @@ -124,7 +124,6 @@ void CV_LatentSVMDetectorTest::run( int /* start_from */) #endif cvReleaseMemStorage( &storage ); cvReleaseLatentSvmDetector( &detector ); - cvReleaseImage( &image ); } // Test for c++ version of Latent SVM diff --git a/modules/ocl/perf/perf_canny.cpp b/modules/ocl/perf/perf_canny.cpp index 428e036d0c..99f74821d0 100644 --- a/modules/ocl/perf/perf_canny.cpp +++ b/modules/ocl/perf/perf_canny.cpp @@ -47,7 +47,7 @@ ///////////// Canny //////////////////////// TEST(Canny) { - Mat img = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE); + Mat img = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE); if (img.empty()) { diff --git a/modules/ocl/perf/perf_haar.cpp b/modules/ocl/perf/perf_haar.cpp index 5a909ace4e..671cf0a329 100644 --- a/modules/ocl/perf/perf_haar.cpp +++ b/modules/ocl/perf/perf_haar.cpp @@ -84,7 +84,7 @@ public: } TEST(Haar) { - Mat img = imread(abspath("basketball1.png"), CV_LOAD_IMAGE_GRAYSCALE); + Mat img = imread(abspath("basketball1.png"), IMREAD_GRAYSCALE); if (img.empty()) { diff --git a/modules/photo/test/test_denoising.cpp b/modules/photo/test/test_denoising.cpp index 57248c7728..ca4f63f222 100644 --- a/modules/photo/test/test_denoising.cpp +++ b/modules/photo/test/test_denoising.cpp @@ -62,8 +62,8 @@ TEST(Photo_DenoisingGrayscale, regression) string original_path = folder + "lena_noised_gaussian_sigma=10.png"; string expected_path = folder + "lena_noised_denoised_grayscale_tw=7_sw=21_h=10.png"; - Mat original = imread(original_path, CV_LOAD_IMAGE_GRAYSCALE); - Mat expected = imread(expected_path, CV_LOAD_IMAGE_GRAYSCALE); + Mat original = imread(original_path, IMREAD_GRAYSCALE); + Mat expected = imread(expected_path, IMREAD_GRAYSCALE); ASSERT_FALSE(original.empty()) << "Could not load input image " << original_path; ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path; @@ -82,8 +82,8 @@ TEST(Photo_DenoisingColored, regression) string original_path = folder + "lena_noised_gaussian_sigma=10.png"; string expected_path = folder + "lena_noised_denoised_lab12_tw=7_sw=21_h=10_h2=10.png"; - Mat original = imread(original_path, CV_LOAD_IMAGE_COLOR); - Mat expected = imread(expected_path, CV_LOAD_IMAGE_COLOR); + Mat original = imread(original_path, IMREAD_COLOR); + Mat expected = imread(expected_path, IMREAD_COLOR); ASSERT_FALSE(original.empty()) << "Could not load input image " << original_path; ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path; @@ -102,14 +102,14 @@ TEST(Photo_DenoisingGrayscaleMulti, regression) string folder = string(cvtest::TS::ptr()->get_data_path()) + "denoising/"; string expected_path = folder + "lena_noised_denoised_multi_tw=7_sw=21_h=15.png"; - Mat expected = imread(expected_path, CV_LOAD_IMAGE_GRAYSCALE); + Mat expected = imread(expected_path, IMREAD_GRAYSCALE); ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path; vector original(imgs_count); for (int i = 0; i < imgs_count; i++) { string original_path = format("%slena_noised_gaussian_sigma=20_multi_%d.png", folder.c_str(), i); - original[i] = imread(original_path, CV_LOAD_IMAGE_GRAYSCALE); + original[i] = imread(original_path, IMREAD_GRAYSCALE); ASSERT_FALSE(original[i].empty()) << "Could not load input image " << original_path; } @@ -127,14 +127,14 @@ TEST(Photo_DenoisingColoredMulti, regression) string folder = string(cvtest::TS::ptr()->get_data_path()) + "denoising/"; string expected_path = folder + "lena_noised_denoised_multi_lab12_tw=7_sw=21_h=10_h2=15.png"; - Mat expected = imread(expected_path, CV_LOAD_IMAGE_COLOR); + Mat expected = imread(expected_path, IMREAD_COLOR); ASSERT_FALSE(expected.empty()) << "Could not load reference image " << expected_path; vector original(imgs_count); for (int i = 0; i < imgs_count; i++) { string original_path = format("%slena_noised_gaussian_sigma=20_multi_%d.png", folder.c_str(), i); - original[i] = imread(original_path, CV_LOAD_IMAGE_COLOR); + original[i] = imread(original_path, IMREAD_COLOR); ASSERT_FALSE(original[i].empty()) << "Could not load input image " << original_path; } diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index 851309c033..7455c5c6bb 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -23,6 +23,8 @@ #include "opencv2/photo.hpp" #include "opencv2/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" + #include "opencv2/opencv_modules.hpp" #ifdef HAVE_OPENCV_NONFREE diff --git a/modules/python/src2/cv2.cv.hpp b/modules/python/src2/cv2.cv.hpp index 76b7d3c084..eb406af62b 100644 --- a/modules/python/src2/cv2.cv.hpp +++ b/modules/python/src2/cv2.cv.hpp @@ -1,5 +1,6 @@ #include "opencv2/legacy.hpp" #include "opencv2/legacy/compat.hpp" +#include "opencv2/highgui/highgui_c.h" #define OLD_MODULESTR "cv2.cv" diff --git a/modules/python/src2/gen2.py b/modules/python/src2/gen2.py index 8669fbd85f..680be2fd35 100755 --- a/modules/python/src2/gen2.py +++ b/modules/python/src2/gen2.py @@ -211,6 +211,7 @@ gen_template_rw_prop_init = Template(""" simple_argtype_mapping = { "bool": ("bool", "b", "0"), + "char": ("char", "b", "0"), "int": ("int", "i", "0"), "float": ("float", "f", "0.f"), "double": ("double", "d", "0"), diff --git a/modules/video/test/test_camshift.cpp b/modules/video/test/test_camshift.cpp index 7dcfb19de2..4a2ddc734b 100644 --- a/modules/video/test/test_camshift.cpp +++ b/modules/video/test/test_camshift.cpp @@ -345,7 +345,7 @@ _exit_: if( code < 0 ) { -#if defined _DEBUG && defined WIN32 +#if 0 //defined _DEBUG && defined WIN32 IplImage* dst = cvCreateImage( img_size, 8, 3 ); cvNamedWindow( "test", 1 ); cvCmpS( img, 0, img, CV_CMP_GT ); @@ -484,7 +484,7 @@ _exit_: if( code < 0 ) { -#if defined _DEBUG && defined WIN32 +#if 0// defined _DEBUG && defined WIN32 IplImage* dst = cvCreateImage( img_size, 8, 3 ); cvNamedWindow( "test", 1 ); cvCmpS( img, 0, img, CV_CMP_GT ); diff --git a/modules/video/test/test_optflowpyrlk.cpp b/modules/video/test/test_optflowpyrlk.cpp index 9a9035b98d..309bf5e307 100644 --- a/modules/video/test/test_optflowpyrlk.cpp +++ b/modules/video/test/test_optflowpyrlk.cpp @@ -72,8 +72,9 @@ void CV_OptFlowPyrLKTest::run( int ) CvMat *_u = 0, *_v = 0, *_v2 = 0; char* status = 0; - IplImage* imgI = 0; - IplImage* imgJ = 0; + IplImage imgI; + IplImage imgJ; + cv::Mat imgI2, imgJ2; int n = 0, i = 0; @@ -115,9 +116,10 @@ void CV_OptFlowPyrLKTest::run( int ) /* read first image */ sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_1.bmp" ); - imgI = cvLoadImage( filename, -1 ); + imgI2 = cv::imread( filename, cv::IMREAD_UNCHANGED ); + imgI = imgI2; - if( !imgI ) + if( imgI2.empty() ) { ts->printf( cvtest::TS::LOG, "could not read %s\n", filename ); code = cvtest::TS::FAIL_MISSING_TEST_DATA; @@ -126,9 +128,10 @@ void CV_OptFlowPyrLKTest::run( int ) /* read second image */ sprintf( filename, "%soptflow/%s", ts->get_data_path().c_str(), "rock_2.bmp" ); - imgJ = cvLoadImage( filename, -1 ); + imgJ2 = cv::imread( filename, cv::IMREAD_UNCHANGED ); + imgJ = imgJ2; - if( !imgJ ) + if( imgJ2.empty() ) { ts->printf( cvtest::TS::LOG, "could not read %s\n", filename ); code = cvtest::TS::FAIL_MISSING_TEST_DATA; @@ -139,7 +142,7 @@ void CV_OptFlowPyrLKTest::run( int ) status = (char*)cvAlloc(n*sizeof(status[0])); /* calculate flow */ - cvCalcOpticalFlowPyrLK( imgI, imgJ, 0, 0, u, v2, n, cvSize( 41, 41 ), + cvCalcOpticalFlowPyrLK( &imgI, &imgJ, 0, 0, u, v2, n, cvSize( 41, 41 ), 4, status, 0, cvTermCriteria( CV_TERMCRIT_ITER| CV_TERMCRIT_EPS, 30, 0.01f ), 0 ); @@ -201,9 +204,6 @@ _exit_: cvReleaseMat( &_v ); cvReleaseMat( &_v2 ); - cvReleaseImage( &imgI ); - cvReleaseImage( &imgJ ); - if( code < 0 ) ts->set_failed_test_info( code ); } diff --git a/modules/videostab/src/frame_source.cpp b/modules/videostab/src/frame_source.cpp index 14f57283d6..0032202115 100644 --- a/modules/videostab/src/frame_source.cpp +++ b/modules/videostab/src/frame_source.cpp @@ -84,10 +84,10 @@ public: } #ifdef HAVE_OPENCV_HIGHGUI - int width() {return static_cast(vc.get(CV_CAP_PROP_FRAME_WIDTH));} - int height() {return static_cast(vc.get(CV_CAP_PROP_FRAME_HEIGHT));} - int count() {return static_cast(vc.get(CV_CAP_PROP_FRAME_COUNT));} - double fps() {return vc.get(CV_CAP_PROP_FPS);} + int width() {return static_cast(vc.get(CAP_PROP_FRAME_WIDTH));} + int height() {return static_cast(vc.get(CAP_PROP_FRAME_HEIGHT));} + int count() {return static_cast(vc.get(CAP_PROP_FRAME_COUNT));} + double fps() {return vc.get(CAP_PROP_FPS);} #else int width() {return 0;} int height() {return 0;} diff --git a/samples/c/adaptiveskindetector.cpp b/samples/c/adaptiveskindetector.cpp index b81bff92d2..a561440af0 100644 --- a/samples/c/adaptiveskindetector.cpp +++ b/samples/c/adaptiveskindetector.cpp @@ -40,7 +40,7 @@ #include #include #include "opencv2/contrib/contrib.hpp" -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" static void help(char **argv) { diff --git a/samples/c/bgfg_codebook.cpp b/samples/c/bgfg_codebook.cpp index 569a2788d3..eba0b71bea 100644 --- a/samples/c/bgfg_codebook.cpp +++ b/samples/c/bgfg_codebook.cpp @@ -24,7 +24,7 @@ #include "opencv2/core/utility.hpp" #include "opencv2/video/background_segm.hpp" #include "opencv2/imgproc/imgproc_c.h" -#include "opencv2/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" #include "opencv2/legacy.hpp" #include diff --git a/samples/c/blobtrack_sample.cpp b/samples/c/blobtrack_sample.cpp index d8209fa305..5fcd1a888d 100644 --- a/samples/c/blobtrack_sample.cpp +++ b/samples/c/blobtrack_sample.cpp @@ -1,7 +1,7 @@ #include "opencv2/video/background_segm.hpp" #include "opencv2/legacy/blobtrack.hpp" #include "opencv2/legacy/legacy.hpp" -#include "opencv2/highgui/highgui.hpp" +#include #include #include diff --git a/samples/c/convert_cascade.c b/samples/c/convert_cascade.c index 16c0fa6b19..a0dc064cbd 100644 --- a/samples/c/convert_cascade.c +++ b/samples/c/convert_cascade.c @@ -1,5 +1,5 @@ #include "opencv2/objdetect/objdetect.hpp" -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" #include #include diff --git a/samples/c/facedetect.cpp b/samples/c/facedetect.cpp index 7d02ac95dd..ceb90822c1 100644 --- a/samples/c/facedetect.cpp +++ b/samples/c/facedetect.cpp @@ -1,8 +1,10 @@ -#include "opencv2/objdetect/objdetect.hpp" -#include "opencv2/highgui/highgui.hpp" -#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/objdetect.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/imgproc.hpp" #include "opencv2/core/utility.hpp" +#include "opencv2/highgui/highgui_c.h" + #include #include #include diff --git a/samples/c/fback_c.c b/samples/c/fback_c.c index af613d25e4..0fd85688d8 100644 --- a/samples/c/fback_c.c +++ b/samples/c/fback_c.c @@ -1,5 +1,5 @@ #include "opencv2/video/tracking.hpp" -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" #include "opencv2/imgproc/imgproc_c.h" #include diff --git a/samples/c/find_obj.cpp b/samples/c/find_obj.cpp index 44e14ef267..651f439245 100644 --- a/samples/c/find_obj.cpp +++ b/samples/c/find_obj.cpp @@ -6,10 +6,10 @@ */ #include "opencv2/objdetect/objdetect.hpp" #include "opencv2/features2d/features2d.hpp" -#include "opencv2/highgui/highgui.hpp" #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/nonfree/nonfree.hpp" #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/highgui/highgui_c.h" #include "opencv2/legacy/legacy.hpp" #include "opencv2/legacy/compat.hpp" diff --git a/samples/c/find_obj_calonder.cpp b/samples/c/find_obj_calonder.cpp index 02cd266526..cb6551778f 100644 --- a/samples/c/find_obj_calonder.cpp +++ b/samples/c/find_obj_calonder.cpp @@ -61,7 +61,7 @@ static void trainCalonderClassifier( const string& classifierFilename, const str string str; getline( is, str ); if (str.empty()) break; - Mat img = imread( str, CV_LOAD_IMAGE_GRAYSCALE ); + Mat img = imread( str, IMREAD_GRAYSCALE ); if( !img.empty() ) trainImgs.push_back( img ); } @@ -106,7 +106,7 @@ static void trainCalonderClassifier( const string& classifierFilename, const str */ static void testCalonderClassifier( const string& classifierFilename, const string& imgFilename ) { - Mat img1 = imread( imgFilename, CV_LOAD_IMAGE_GRAYSCALE ), img2, H12; + Mat img1 = imread( imgFilename, IMREAD_GRAYSCALE ), img2, H12; if( img1.empty() ) { cout << "Test image can not be read." << endl; diff --git a/samples/c/find_obj_ferns.cpp b/samples/c/find_obj_ferns.cpp index 093d08f368..cd48684f97 100644 --- a/samples/c/find_obj_ferns.cpp +++ b/samples/c/find_obj_ferns.cpp @@ -32,8 +32,8 @@ int main(int argc, char** argv) help(); - Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE ); - Mat scene = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE ); + Mat object = imread( object_filename, IMREAD_GRAYSCALE ); + Mat scene = imread( scene_filename, IMREAD_GRAYSCALE ); if( !object.data || !scene.data ) { @@ -47,9 +47,9 @@ int main(int argc, char** argv) resize(scene, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC); - cvNamedWindow("Object", 1); - cvNamedWindow("Image", 1); - cvNamedWindow("Object Correspondence", 1); + namedWindow("Object", 1); + namedWindow("Image", 1); + namedWindow("Object Correspondence", 1); Size patchSize(32, 32); LDetector ldetector(7, 20, 2, 2000, patchSize.width, 2); diff --git a/samples/c/latentsvmdetect.cpp b/samples/c/latentsvmdetect.cpp index e74b227a62..1b8770f878 100644 --- a/samples/c/latentsvmdetect.cpp +++ b/samples/c/latentsvmdetect.cpp @@ -1,5 +1,5 @@ -#include "opencv2/objdetect/objdetect.hpp" -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/objdetect.hpp" +#include "opencv2/highgui/highgui_c.h" #include #ifdef HAVE_CVCONFIG_H diff --git a/samples/c/morphology.c b/samples/c/morphology.c index b71046eea0..fc2e2f195c 100644 --- a/samples/c/morphology.c +++ b/samples/c/morphology.c @@ -1,4 +1,4 @@ -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" #include "opencv2/imgproc/imgproc_c.h" #include diff --git a/samples/c/motempl.c b/samples/c/motempl.c index df089f590f..56ec2f1040 100644 --- a/samples/c/motempl.c +++ b/samples/c/motempl.c @@ -1,5 +1,5 @@ #include "opencv2/video/tracking.hpp" -#include "opencv2/highgui/highgui.hpp" +#include "opencv2/highgui/highgui_c.h" #include "opencv2/imgproc/imgproc_c.h" #include #include diff --git a/samples/c/one_way_sample.cpp b/samples/c/one_way_sample.cpp index c017976b06..d2ca7f4009 100644 --- a/samples/c/one_way_sample.cpp +++ b/samples/c/one_way_sample.cpp @@ -49,8 +49,8 @@ int main(int argc, char** argv) std::string img2_name = path_name + "/" + std::string(argv[3]); printf("Reading the images...\n"); - Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE); - Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE); + Mat img1 = imread(img1_name, IMREAD_GRAYSCALE); + Mat img2 = imread(img2_name, IMREAD_GRAYSCALE); // extract keypoints from the first image SURF surf_extractor(5.0e3); diff --git a/samples/c/smiledetect.cpp b/samples/c/smiledetect.cpp index 07c482147f..aeb8773894 100644 --- a/samples/c/smiledetect.cpp +++ b/samples/c/smiledetect.cpp @@ -1,8 +1,10 @@ -#include "opencv2/objdetect/objdetect.hpp" -#include "opencv2/highgui/highgui.hpp" -#include "opencv2/imgproc/imgproc.hpp" +#include "opencv2/objdetect.hpp" +#include "opencv2/highgui.hpp" +#include "opencv2/imgproc.hpp" #include "opencv2/core/utility.hpp" +#include "opencv2/highgui/highgui_c.h" + #include #include #include diff --git a/samples/cpp/Qt_sample/main.cpp b/samples/cpp/Qt_sample/main.cpp index e40d72bd4e..c884a330ac 100644 --- a/samples/cpp/Qt_sample/main.cpp +++ b/samples/cpp/Qt_sample/main.cpp @@ -4,7 +4,13 @@ #include #include -#include + +#include +#include +#include +#include + +#include #if defined WIN32 || defined _WIN32 || defined WINCE #include @@ -20,9 +26,6 @@ #include #endif -#include -#include - using namespace std; using namespace cv; @@ -224,19 +227,20 @@ static void createOpenGLMatrixFrom(float *posePOSIT,const CvMatr32f &rotationMat int main(void) { help(); - CvCapture* video = cvCaptureFromFile("cube4.avi"); - CV_Assert(video); + VideoCapture video("cube4.avi"); + CV_Assert(video.isOpened()); - IplImage* source = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,3); - IplImage* grayImage = cvCreateImage(cvGetSize(cvQueryFrame(video)),8,1); + Mat frame; video >> frame; - cvNamedWindow("original",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO); - cvNamedWindow("POSIT",CV_WINDOW_AUTOSIZE | CV_WINDOW_FREERATIO); + IplImage* grayImage = cvCreateImage(frame.size(),8,1); + + namedWindow("original", WINDOW_AUTOSIZE | WINDOW_FREERATIO); + namedWindow("POSIT", WINDOW_AUTOSIZE | WINDOW_FREERATIO); displayOverlay("POSIT", "We lost the 4 corners' detection quite often (the red circles disappear). This demo is only to illustrate how to use OpenGL callback.\n -- Press ESC to exit.", 10000); //For debug //cvNamedWindow("tempGray",CV_WINDOW_AUTOSIZE); float OpenGLMatrix[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; - cvSetOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix); + setOpenGlDrawCallback("POSIT",on_opengl,OpenGLMatrix); vector modelPoints; initPOSIT(&modelPoints); @@ -251,26 +255,27 @@ int main(void) vector srcImagePoints(4,cvPoint2D32f(0,0)); - while(cvWaitKey(33) != 27) + while(waitKey(33) != 27) { - source=cvQueryFrame(video); - cvShowImage("original",source); + video >> frame; + imshow("original", frame); - foundCorners(&srcImagePoints,source,grayImage); + IplImage source = frame; + foundCorners(&srcImagePoints, &source, grayImage); cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector ); createOpenGLMatrixFrom(OpenGLMatrix,rotation_matrix,translation_vector); - cvShowImage("POSIT",source); + imshow("POSIT", frame); //For debug //cvShowImage("tempGray",grayImage); - if (cvGetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO)>0.99) - cvSetCaptureProperty(video,CV_CAP_PROP_POS_AVI_RATIO,0); + if (video.get(CAP_PROP_POS_AVI_RATIO) > 0.99) + video.set(CAP_PROP_POS_AVI_RATIO, 0); } - cvDestroyAllWindows(); + destroyAllWindows(); cvReleaseImage(&grayImage); - cvReleaseCapture(&video); + video.release(); cvReleasePOSITObject(&positObject); return 0; diff --git a/samples/cpp/bgfg_segm.cpp b/samples/cpp/bgfg_segm.cpp index 6542f614c4..a3d02009a7 100644 --- a/samples/cpp/bgfg_segm.cpp +++ b/samples/cpp/bgfg_segm.cpp @@ -47,10 +47,10 @@ int main(int argc, const char** argv) return -1; } - namedWindow("image", CV_WINDOW_NORMAL); - namedWindow("foreground mask", CV_WINDOW_NORMAL); - namedWindow("foreground image", CV_WINDOW_NORMAL); - namedWindow("mean background image", CV_WINDOW_NORMAL); + namedWindow("image", WINDOW_NORMAL); + namedWindow("foreground mask", WINDOW_NORMAL); + namedWindow("foreground image", WINDOW_NORMAL); + namedWindow("mean background image", WINDOW_NORMAL); Ptr bg_model = createBackgroundSubtractorMOG2(); diff --git a/samples/cpp/brief_match_test.cpp b/samples/cpp/brief_match_test.cpp index 13902206a5..1f121327f3 100644 --- a/samples/cpp/brief_match_test.cpp +++ b/samples/cpp/brief_match_test.cpp @@ -66,8 +66,8 @@ int main(int argc, const char ** argv) string im1_name = parser.get(0); string im2_name = parser.get(1); - Mat im1 = imread(im1_name, CV_LOAD_IMAGE_GRAYSCALE); - Mat im2 = imread(im2_name, CV_LOAD_IMAGE_GRAYSCALE); + Mat im1 = imread(im1_name, IMREAD_GRAYSCALE); + Mat im2 = imread(im2_name, IMREAD_GRAYSCALE); if (im1.empty() || im2.empty()) { diff --git a/samples/cpp/calibration_artificial.cpp b/samples/cpp/calibration_artificial.cpp index b4496615ce..f2a8f1b707 100644 --- a/samples/cpp/calibration_artificial.cpp +++ b/samples/cpp/calibration_artificial.cpp @@ -120,7 +120,7 @@ int main() imshow("Current chessboard", boards[i]); waitKey(1000); } cout << "Done" << endl; - cvDestroyAllWindows(); + destroyAllWindows(); Mat camMat_est; Mat distCoeffs_est; diff --git a/samples/cpp/camshiftdemo.cpp b/samples/cpp/camshiftdemo.cpp index 44bce7637e..a61996e487 100644 --- a/samples/cpp/camshiftdemo.cpp +++ b/samples/cpp/camshiftdemo.cpp @@ -33,12 +33,12 @@ static void onMouse( int event, int x, int y, int, void* ) switch( event ) { - case CV_EVENT_LBUTTONDOWN: + case EVENT_LBUTTONDOWN: origin = Point(x,y); selection = Rect(x,y,0,0); selectObject = true; break; - case CV_EVENT_LBUTTONUP: + case EVENT_LBUTTONUP: selectObject = false; if( selection.width > 0 && selection.height > 0 ) trackObject = -1; diff --git a/samples/cpp/contours2.cpp b/samples/cpp/contours2.cpp index 7d728ec779..0b488959ef 100644 --- a/samples/cpp/contours2.cpp +++ b/samples/cpp/contours2.cpp @@ -28,7 +28,7 @@ static void on_trackbar(int, void*) Mat cnt_img = Mat::zeros(w, w, CV_8UC3); int _levels = levels - 3; drawContours( cnt_img, contours, _levels <= 0 ? 3 : -1, Scalar(128,255,255), - 3, CV_AA, hierarchy, std::abs(_levels) ); + 3, LINE_AA, hierarchy, std::abs(_levels) ); imshow("contours", cnt_img); } diff --git a/samples/cpp/convexhull.cpp b/samples/cpp/convexhull.cpp index ce1503c9a6..2aef70fe7d 100644 --- a/samples/cpp/convexhull.cpp +++ b/samples/cpp/convexhull.cpp @@ -41,7 +41,7 @@ int main( int /*argc*/, char** /*argv*/ ) img = Scalar::all(0); for( i = 0; i < count; i++ ) - circle(img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA); + circle(img, points[i], 3, Scalar(0, 0, 255), FILLED, LINE_AA); int hullcount = (int)hull.size(); Point pt0 = points[hull[hullcount-1]]; @@ -49,7 +49,7 @@ int main( int /*argc*/, char** /*argv*/ ) for( i = 0; i < hullcount; i++ ) { Point pt = points[hull[i]]; - line(img, pt0, pt, Scalar(0, 255, 0), 1, CV_AA); + line(img, pt0, pt, Scalar(0, 255, 0), 1,LINE_AA); pt0 = pt; } diff --git a/samples/cpp/dbt_face_detection.cpp b/samples/cpp/dbt_face_detection.cpp index 35386a78d0..447e6509c7 100644 --- a/samples/cpp/dbt_face_detection.cpp +++ b/samples/cpp/dbt_face_detection.cpp @@ -84,7 +84,7 @@ int main(int , char** ) imshow(WindowName, ReferenceFrame); - if (cvWaitKey(30) >= 0) break; + if (waitKey(30) >= 0) break; } Detector.stop(); diff --git a/samples/cpp/delaunay2.cpp b/samples/cpp/delaunay2.cpp index 87e6664b67..a00e31a988 100644 --- a/samples/cpp/delaunay2.cpp +++ b/samples/cpp/delaunay2.cpp @@ -18,7 +18,7 @@ static void help() static void draw_subdiv_point( Mat& img, Point2f fp, Scalar color ) { - circle( img, fp, 3, color, CV_FILLED, 8, 0 ); + circle( img, fp, 3, color, FILLED, LINE_8, 0 ); } static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color ) @@ -34,9 +34,9 @@ static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color ) pt[0] = Point(cvRound(t[0]), cvRound(t[1])); pt[1] = Point(cvRound(t[2]), cvRound(t[3])); pt[2] = Point(cvRound(t[4]), cvRound(t[5])); - line(img, pt[0], pt[1], delaunay_color, 1, CV_AA, 0); - line(img, pt[1], pt[2], delaunay_color, 1, CV_AA, 0); - line(img, pt[2], pt[0], delaunay_color, 1, CV_AA, 0); + line(img, pt[0], pt[1], delaunay_color, 1, LINE_AA, 0); + line(img, pt[1], pt[2], delaunay_color, 1, LINE_AA, 0); + line(img, pt[2], pt[0], delaunay_color, 1, LINE_AA, 0); } #else vector edgeList; @@ -46,7 +46,7 @@ static void draw_subdiv( Mat& img, Subdiv2D& subdiv, Scalar delaunay_color ) Vec4f e = edgeList[i]; Point pt0 = Point(cvRound(e[0]), cvRound(e[1])); Point pt1 = Point(cvRound(e[2]), cvRound(e[3])); - line(img, pt0, pt1, delaunay_color, 1, CV_AA, 0); + line(img, pt0, pt1, delaunay_color, 1, LINE_AA, 0); } #endif } @@ -64,7 +64,7 @@ static void locate_point( Mat& img, Subdiv2D& subdiv, Point2f fp, Scalar active_ { Point2f org, dst; if( subdiv.edgeOrg(e, &org) > 0 && subdiv.edgeDst(e, &dst) > 0 ) - line( img, org, dst, active_color, 3, CV_AA, 0 ); + line( img, org, dst, active_color, 3, LINE_AA, 0 ); e = subdiv.getEdge(e, Subdiv2D::NEXT_AROUND_LEFT); } @@ -97,8 +97,8 @@ static void paint_voronoi( Mat& img, Subdiv2D& subdiv ) fillConvexPoly(img, ifacet, color, 8, 0); ifacets[0] = ifacet; - polylines(img, ifacets, true, Scalar(), 1, CV_AA, 0); - circle(img, centers[i], 3, Scalar(), -1, CV_AA, 0); + polylines(img, ifacets, true, Scalar(), 1, LINE_AA, 0); + circle(img, centers[i], 3, Scalar(), FILLED, LINE_AA, 0); } } diff --git a/samples/cpp/demhist.cpp b/samples/cpp/demhist.cpp index eef25964af..ba60115df0 100644 --- a/samples/cpp/demhist.cpp +++ b/samples/cpp/demhist.cpp @@ -44,7 +44,7 @@ static void updateBrightnessContrast( int /*arg*/, void* ) calcHist(&dst, 1, 0, Mat(), hist, 1, &histSize, 0); Mat histImage = Mat::ones(200, 320, CV_8U)*255; - normalize(hist, hist, 0, histImage.rows, CV_MINMAX, CV_32F); + normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, CV_32F); histImage = Scalar::all(255); int binW = cvRound((double)histImage.cols/histSize); diff --git a/samples/cpp/dft.cpp b/samples/cpp/dft.cpp index 234dc9a4e3..b94387c54f 100644 --- a/samples/cpp/dft.cpp +++ b/samples/cpp/dft.cpp @@ -27,7 +27,7 @@ int main(int argc, const char ** argv) CommandLineParser parser(argc, argv, keys); string filename = parser.get(0); - Mat img = imread(filename.c_str(), CV_LOAD_IMAGE_GRAYSCALE); + Mat img = imread(filename.c_str(), IMREAD_GRAYSCALE); if( img.empty() ) { help(); @@ -74,7 +74,7 @@ int main(int argc, const char ** argv) q2.copyTo(q1); tmp.copyTo(q2); - normalize(mag, mag, 0, 1, CV_MINMAX); + normalize(mag, mag, 0, 1, NORM_MINMAX); imshow("spectrum magnitude", mag); waitKey(); diff --git a/samples/cpp/distrans.cpp b/samples/cpp/distrans.cpp index 1bd85bbc2b..1afc9b37ee 100644 --- a/samples/cpp/distrans.cpp +++ b/samples/cpp/distrans.cpp @@ -130,7 +130,7 @@ int main( int argc, const char** argv ) // Call to update the view onTrackbar(0, 0); - int c = cvWaitKey(0) & 255; + int c = waitKey() & 255; if( c == 27 ) break; diff --git a/samples/cpp/drawing.cpp b/samples/cpp/drawing.cpp index 5467463902..b0848168df 100644 --- a/samples/cpp/drawing.cpp +++ b/samples/cpp/drawing.cpp @@ -21,7 +21,7 @@ int main() char wndname[] = "Drawing Demo"; const int NUMBER = 100; const int DELAY = 5; - int lineType = CV_AA; // change it to 8 to see non-antialiased graphics + int lineType = LINE_AA; // change it to LINE_8 to see non-antialiased graphics int i, width = 1000, height = 700; int x1 = -width/2, x2 = width*3/2, y1 = -height/2, y2 = height*3/2; RNG rng(0xFFFFFFFF); @@ -157,14 +157,14 @@ int main() return 0; } - Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0); + Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0); Point org((width - textsize.width)/2, (height - textsize.height)/2); Mat image2; for( i = 0; i < 255; i += 2 ) { image2 = image - Scalar::all(i); - putText(image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3, + putText(image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3, Scalar(i, i, 255), 5, lineType); imshow(wndname, image2); diff --git a/samples/cpp/ffilldemo.cpp b/samples/cpp/ffilldemo.cpp index bb207c307e..1cdce9bb5c 100644 --- a/samples/cpp/ffilldemo.cpp +++ b/samples/cpp/ffilldemo.cpp @@ -34,7 +34,7 @@ int newMaskVal = 255; static void onMouse( int event, int x, int y, int, void* ) { - if( event != CV_EVENT_LBUTTONDOWN ) + if( event != EVENT_LBUTTONDOWN ) return; Point seed = Point(x,y); diff --git a/samples/cpp/fitellipse.cpp b/samples/cpp/fitellipse.cpp index 2fa62b8e46..c42f8f3c3e 100644 --- a/samples/cpp/fitellipse.cpp +++ b/samples/cpp/fitellipse.cpp @@ -82,12 +82,12 @@ void processImage(int /*h*/, void*) continue; drawContours(cimage, contours, (int)i, Scalar::all(255), 1, 8); - ellipse(cimage, box, Scalar(0,0,255), 1, CV_AA); - ellipse(cimage, box.center, box.size*0.5f, box.angle, 0, 360, Scalar(0,255,255), 1, CV_AA); + ellipse(cimage, box, Scalar(0,0,255), 1, LINE_AA); + ellipse(cimage, box.center, box.size*0.5f, box.angle, 0, 360, Scalar(0,255,255), 1, LINE_AA); Point2f vtx[4]; box.points(vtx); for( int j = 0; j < 4; j++ ) - line(cimage, vtx[j], vtx[(j+1)%4], Scalar(0,255,0), 1, CV_AA); + line(cimage, vtx[j], vtx[(j+1)%4], Scalar(0,255,0), 1, LINE_AA); } imshow("result", cimage); diff --git a/samples/cpp/freak_demo.cpp b/samples/cpp/freak_demo.cpp index d6ea45ac04..b420e455c5 100644 --- a/samples/cpp/freak_demo.cpp +++ b/samples/cpp/freak_demo.cpp @@ -66,13 +66,13 @@ int main( int argc, char** argv ) { } // Load images - Mat imgA = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE ); + Mat imgA = imread(argv[1], IMREAD_GRAYSCALE ); if( !imgA.data ) { std::cout<< " --(!) Error reading image " << argv[1] << std::endl; return -1; } - Mat imgB = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE ); + Mat imgB = imread(argv[2], IMREAD_GRAYSCALE ); if( !imgA.data ) { std::cout << " --(!) Error reading image " << argv[2] << std::endl; return -1; @@ -123,7 +123,7 @@ int main( int argc, char** argv ) { Mat imgMatch; drawMatches(imgA, keypointsA, imgB, keypointsB, matches, imgMatch); - namedWindow("matches", CV_WINDOW_KEEPRATIO); + namedWindow("matches", WINDOW_KEEPRATIO); imshow("matches", imgMatch); waitKey(0); } diff --git a/samples/cpp/generic_descriptor_match.cpp b/samples/cpp/generic_descriptor_match.cpp index 7c69ccdeda..5d38ddadfe 100644 --- a/samples/cpp/generic_descriptor_match.cpp +++ b/samples/cpp/generic_descriptor_match.cpp @@ -40,8 +40,8 @@ int main(int argc, char** argv) } //printf("Reading the images...\n"); - Mat img1 = imread(img1_name, CV_LOAD_IMAGE_GRAYSCALE); - Mat img2 = imread(img2_name, CV_LOAD_IMAGE_GRAYSCALE); + Mat img1 = imread(img1_name, IMREAD_GRAYSCALE); + Mat img2 = imread(img2_name, IMREAD_GRAYSCALE); // extract keypoints from the first image SURF surf_extractor(5.0e3); diff --git a/samples/cpp/grabcut.cpp b/samples/cpp/grabcut.cpp index 40280e4570..f276e75666 100644 --- a/samples/cpp/grabcut.cpp +++ b/samples/cpp/grabcut.cpp @@ -33,13 +33,13 @@ const Scalar BLUE = Scalar(255,0,0); const Scalar LIGHTBLUE = Scalar(255,255,160); const Scalar GREEN = Scalar(0,255,0); -const int BGD_KEY = CV_EVENT_FLAG_CTRLKEY; -const int FGD_KEY = CV_EVENT_FLAG_SHIFTKEY; +const int BGD_KEY = EVENT_FLAG_CTRLKEY; +const int FGD_KEY = EVENT_FLAG_SHIFTKEY; static void getBinMask( const Mat& comMask, Mat& binMask ) { if( comMask.empty() || comMask.type()!=CV_8UC1 ) - CV_Error( CV_StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" ); + CV_Error( Error::StsBadArg, "comMask is empty or has incorrect type (not CV_8UC1)" ); if( binMask.empty() || binMask.rows!=comMask.rows || binMask.cols!=comMask.cols ) binMask.create( comMask.size(), CV_8UC1 ); binMask = comMask & 1; @@ -132,7 +132,7 @@ void GCApplication::showImage() const void GCApplication::setRectInMask() { - assert( !mask.empty() ); + CV_Assert( !mask.empty() ); mask.setTo( GC_BGD ); rect.x = max(0, rect.x); rect.y = max(0, rect.y); @@ -176,7 +176,7 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* ) // TODO add bad args check switch( event ) { - case CV_EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels + case EVENT_LBUTTONDOWN: // set rect or GC_BGD(GC_FGD) labels { bool isb = (flags & BGD_KEY) != 0, isf = (flags & FGD_KEY) != 0; @@ -189,7 +189,7 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* ) lblsState = IN_PROCESS; } break; - case CV_EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels + case EVENT_RBUTTONDOWN: // set GC_PR_BGD(GC_PR_FGD) labels { bool isb = (flags & BGD_KEY) != 0, isf = (flags & FGD_KEY) != 0; @@ -197,13 +197,13 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* ) prLblsState = IN_PROCESS; } break; - case CV_EVENT_LBUTTONUP: + case EVENT_LBUTTONUP: if( rectState == IN_PROCESS ) { rect = Rect( Point(rect.x, rect.y), Point(x,y) ); rectState = SET; setRectInMask(); - assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() ); + CV_Assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() ); showImage(); } if( lblsState == IN_PROCESS ) @@ -213,7 +213,7 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* ) showImage(); } break; - case CV_EVENT_RBUTTONUP: + case EVENT_RBUTTONUP: if( prLblsState == IN_PROCESS ) { setLblsInMask(flags, Point(x,y), true); @@ -221,11 +221,11 @@ void GCApplication::mouseClick( int event, int x, int y, int flags, void* ) showImage(); } break; - case CV_EVENT_MOUSEMOVE: + case EVENT_MOUSEMOVE: if( rectState == IN_PROCESS ) { rect = Rect( Point(rect.x, rect.y), Point(x,y) ); - assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() ); + CV_Assert( bgdPxls.empty() && fgdPxls.empty() && prBgdPxls.empty() && prFgdPxls.empty() ); showImage(); } else if( lblsState == IN_PROCESS ) @@ -296,15 +296,15 @@ int main( int argc, char** argv ) help(); const string winName = "image"; - cvNamedWindow( winName.c_str(), CV_WINDOW_AUTOSIZE ); - cvSetMouseCallback( winName.c_str(), on_mouse, 0 ); + namedWindow( winName.c_str(), WINDOW_AUTOSIZE ); + setMouseCallback( winName.c_str(), on_mouse, 0 ); gcapp.setImageAndWinName( image, winName ); gcapp.showImage(); for(;;) { - int c = cvWaitKey(0); + int c = waitKey(); switch( (char) c ) { case '\x1b': @@ -331,6 +331,6 @@ int main( int argc, char** argv ) } exit_main: - cvDestroyWindow( winName.c_str() ); + destroyWindow( winName.c_str() ); return 0; } diff --git a/samples/cpp/houghcircles.cpp b/samples/cpp/houghcircles.cpp index b51faf53a5..12f1c57677 100644 --- a/samples/cpp/houghcircles.cpp +++ b/samples/cpp/houghcircles.cpp @@ -37,8 +37,8 @@ int main(int argc, char** argv) for( size_t i = 0; i < circles.size(); i++ ) { Vec3i c = circles[i]; - circle( cimg, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, CV_AA); - circle( cimg, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, CV_AA); + circle( cimg, Point(c[0], c[1]), c[2], Scalar(0,0,255), 3, LINE_AA); + circle( cimg, Point(c[0], c[1]), 2, Scalar(0,255,0), 3, LINE_AA); } imshow("detected circles", cimg); diff --git a/samples/cpp/houghlines.cpp b/samples/cpp/houghlines.cpp index 7efb990e63..8dea5c15d0 100644 --- a/samples/cpp/houghlines.cpp +++ b/samples/cpp/houghlines.cpp @@ -51,7 +51,7 @@ int main(int argc, char** argv) for( size_t i = 0; i < lines.size(); i++ ) { Vec4i l = lines[i]; - line( cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, CV_AA); + line( cdst, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(0,0,255), 3, LINE_AA); } #endif imshow("source", src); diff --git a/samples/cpp/hybridtrackingsample.cpp b/samples/cpp/hybridtrackingsample.cpp index 8c925ac9d5..09ea76dc1b 100644 --- a/samples/cpp/hybridtrackingsample.cpp +++ b/samples/cpp/hybridtrackingsample.cpp @@ -47,12 +47,12 @@ static void onMouse(int event, int x, int y, int, void*) { } switch (event) { - case CV_EVENT_LBUTTONDOWN: + case EVENT_LBUTTONDOWN: origin = Point(x, y); selection = Rect(x, y, 0, 0); selectObject = true; break; - case CV_EVENT_LBUTTONUP: + case EVENT_LBUTTONUP: selectObject = false; trackObject = -1; break; @@ -96,8 +96,8 @@ int main(int argc, char** argv) return 0; } cout << "Opened camera" << endl; - cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); - cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); + cap.set(CAP_PROP_FRAME_WIDTH, 640); + cap.set(CAP_PROP_FRAME_HEIGHT, 480); cap >> frame; } @@ -137,7 +137,7 @@ int main(int argc, char** argv) int values_read = fscanf(f, "%d %f %f %f %f\n", &i, &w[0], &w[1], &w[2], &w[3]); CV_Assert(values_read == 5); sprintf(img_file, "seqG/%04d.png", i); - image = imread(img_file, CV_LOAD_IMAGE_COLOR); + image = imread(img_file, IMREAD_COLOR); if (image.empty()) break; selection = Rect(cvRound(w[0]*image.cols), cvRound(w[1]*image.rows), diff --git a/samples/cpp/image.cpp b/samples/cpp/image.cpp index 0e9ea9cf69..806926b042 100644 --- a/samples/cpp/image.cpp +++ b/samples/cpp/image.cpp @@ -1,9 +1,9 @@ #include #include -#include "opencv2/imgproc/imgproc.hpp" -#include "opencv2/highgui/highgui.hpp" -#include "opencv2/flann/miniflann.hpp" -#include "opencv2/core/utility.hpp" +#include +#include +#include +#include using namespace cv; // all the new API is put into "cv" namespace. Export its content using namespace std; @@ -22,6 +22,10 @@ static void help() // enable/disable use of mixed API in the code below. #define DEMO_MIXED_API_USE 1 +#ifdef DEMO_MIXED_API_USE +# include +#endif + int main( int argc, char** argv ) { help(); @@ -110,7 +114,7 @@ int main( int argc, char** argv ) cvtColor(img_yuv, img, COLOR_YCrCb2BGR); // this is counterpart for cvNamedWindow - namedWindow("image with grain", CV_WINDOW_AUTOSIZE); + namedWindow("image with grain", WINDOW_AUTOSIZE); #if DEMO_MIXED_API_USE // this is to demonstrate that img and iplimg really share the data - the result of the above // processing is stored in img and thus in iplimg too. diff --git a/samples/cpp/image_alignment.cpp b/samples/cpp/image_alignment.cpp index 08f9c0dd22..b251e72531 100644 --- a/samples/cpp/image_alignment.cpp +++ b/samples/cpp/image_alignment.cpp @@ -347,10 +347,10 @@ int main (const int argc, const char * argv[]) cout << "The warped image has been saved in the file: " << warpedImFile << endl << flush; - namedWindow ("image", CV_WINDOW_AUTOSIZE); - namedWindow ("template", CV_WINDOW_AUTOSIZE); - namedWindow ("warped image", CV_WINDOW_AUTOSIZE); - namedWindow ("error (black: no error)", CV_WINDOW_AUTOSIZE); + namedWindow ("image", WINDOW_AUTOSIZE); + namedWindow ("template", WINDOW_AUTOSIZE); + namedWindow ("warped image", WINDOW_AUTOSIZE); + namedWindow ("error (black: no error)", WINDOW_AUTOSIZE); moveWindow ("template", 350, 350); moveWindow ("warped image", 600, 300); diff --git a/samples/cpp/inpaint.cpp b/samples/cpp/inpaint.cpp index 223b2de127..1ec00034a9 100644 --- a/samples/cpp/inpaint.cpp +++ b/samples/cpp/inpaint.cpp @@ -27,11 +27,11 @@ Point prevPt(-1,-1); static void onMouse( int event, int x, int y, int flags, void* ) { - if( event == CV_EVENT_LBUTTONUP || !(flags & CV_EVENT_FLAG_LBUTTON) ) + if( event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON) ) prevPt = Point(-1,-1); - else if( event == CV_EVENT_LBUTTONDOWN ) + else if( event == EVENT_LBUTTONDOWN ) prevPt = Point(x,y); - else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON) ) + else if( event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON) ) { Point pt(x,y); if( prevPt.x < 0 ) diff --git a/samples/cpp/kmeans.cpp b/samples/cpp/kmeans.cpp index 97de6a0a48..0cc313f756 100644 --- a/samples/cpp/kmeans.cpp +++ b/samples/cpp/kmeans.cpp @@ -47,13 +47,13 @@ int main( int /*argc*/, char** /*argv*/ ) Mat pointChunk = points.rowRange(k*sampleCount/clusterCount, k == clusterCount - 1 ? sampleCount : (k+1)*sampleCount/clusterCount); - rng.fill(pointChunk, CV_RAND_NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05)); + rng.fill(pointChunk, RNG::NORMAL, Scalar(center.x, center.y), Scalar(img.cols*0.05, img.rows*0.05)); } randShuffle(points, 1, &rng); kmeans(points, clusterCount, labels, - TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 10, 1.0), + TermCriteria( TermCriteria::EPS+TermCriteria::COUNT, 10, 1.0), 3, KMEANS_PP_CENTERS, centers); img = Scalar::all(0); @@ -62,7 +62,7 @@ int main( int /*argc*/, char** /*argv*/ ) { int clusterIdx = labels.at(i); Point ipt = points.at(i); - circle( img, ipt, 2, colorTab[clusterIdx], CV_FILLED, CV_AA ); + circle( img, ipt, 2, colorTab[clusterIdx], FILLED, LINE_AA ); } imshow("clusters", img); diff --git a/samples/cpp/laplace.cpp b/samples/cpp/laplace.cpp index 8bddeb820e..45264e3107 100644 --- a/samples/cpp/laplace.cpp +++ b/samples/cpp/laplace.cpp @@ -34,15 +34,15 @@ int main( int argc, char** argv ) cap.open(argv[1]); if( cap.isOpened() ) cout << "Video " << argv[1] << - ": width=" << cap.get(CV_CAP_PROP_FRAME_WIDTH) << - ", height=" << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << - ", nframes=" << cap.get(CV_CAP_PROP_FRAME_COUNT) << endl; + ": width=" << cap.get(CAP_PROP_FRAME_WIDTH) << + ", height=" << cap.get(CAP_PROP_FRAME_HEIGHT) << + ", nframes=" << cap.get(CAP_PROP_FRAME_COUNT) << endl; if( argc > 2 && isdigit(argv[2][0]) ) { int pos; sscanf(argv[2], "%d", &pos); cout << "seeking to frame #" << pos << endl; - cap.set(CV_CAP_PROP_POS_FRAMES, pos); + cap.set(CAP_PROP_POS_FRAMES, pos); } } diff --git a/samples/cpp/linemod.cpp b/samples/cpp/linemod.cpp index bfa1b7555d..08d2a00354 100644 --- a/samples/cpp/linemod.cpp +++ b/samples/cpp/linemod.cpp @@ -31,7 +31,7 @@ class Mouse public: static void start(const std::string& a_img_name) { - cvSetMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0); + cv::setMouseCallback(a_img_name.c_str(), Mouse::cv_on_mouse, 0); } static int event(void) { @@ -190,14 +190,14 @@ int main(int argc, char * argv[]) int num_modalities = (int)detector->getModalities().size(); // Open Kinect sensor - cv::VideoCapture capture( CV_CAP_OPENNI ); + cv::VideoCapture capture( cv::CAP_OPENNI ); if (!capture.isOpened()) { printf("Could not open OpenNI-capable sensor\n"); return -1; } - capture.set(CV_CAP_PROP_OPENNI_REGISTRATION, 1); - double focal_length = capture.get(CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH); + capture.set(cv::CAP_PROP_OPENNI_REGISTRATION, 1); + double focal_length = capture.get(cv::CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH); //printf("Focal length = %f\n", focal_length); // Main loop @@ -206,8 +206,8 @@ int main(int argc, char * argv[]) { // Capture next color/depth pair capture.grab(); - capture.retrieve(depth, CV_CAP_OPENNI_DEPTH_MAP); - capture.retrieve(color, CV_CAP_OPENNI_BGR_IMAGE); + capture.retrieve(depth, cv::CAP_OPENNI_DEPTH_MAP); + capture.retrieve(color, cv::CAP_OPENNI_BGR_IMAGE); std::vector sources; sources.push_back(color); @@ -224,7 +224,7 @@ int main(int argc, char * argv[]) cv::Point pt1 = mouse - roi_offset; // top left cv::Point pt2 = mouse + roi_offset; // bottom right - if (event == CV_EVENT_RBUTTONDOWN) + if (event == cv::EVENT_RBUTTONDOWN) { // Compute object mask by subtracting the plane within the ROI std::vector chain(4); @@ -331,7 +331,7 @@ int main(int argc, char * argv[]) cv::imshow("normals", quantized_images[1]); cv::FileStorage fs; - char key = (char)cvWaitKey(10); + char key = (char)cv::waitKey(10); if( key == 'q' ) break; diff --git a/samples/cpp/lkdemo.cpp b/samples/cpp/lkdemo.cpp index 9ea395c8a3..3b7fb8bcb3 100644 --- a/samples/cpp/lkdemo.cpp +++ b/samples/cpp/lkdemo.cpp @@ -28,7 +28,7 @@ bool addRemovePt = false; static void onMouse( int event, int x, int y, int /*flags*/, void* /*param*/ ) { - if( event == CV_EVENT_LBUTTONDOWN ) + if( event == EVENT_LBUTTONDOWN ) { point = Point2f((float)x,(float)y); addRemovePt = true; diff --git a/samples/cpp/matcher_simple.cpp b/samples/cpp/matcher_simple.cpp index acdf55f700..c1cd6e4c30 100644 --- a/samples/cpp/matcher_simple.cpp +++ b/samples/cpp/matcher_simple.cpp @@ -23,8 +23,8 @@ int main(int argc, char** argv) return -1; } - Mat img1 = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); - Mat img2 = imread(argv[2], CV_LOAD_IMAGE_GRAYSCALE); + Mat img1 = imread(argv[1], IMREAD_GRAYSCALE); + Mat img2 = imread(argv[2], IMREAD_GRAYSCALE); if(img1.empty() || img2.empty()) { printf("Can't read one of the images\n"); diff --git a/samples/cpp/matching_to_many_images.cpp b/samples/cpp/matching_to_many_images.cpp index f6c31b2c48..7a346e3f49 100644 --- a/samples/cpp/matching_to_many_images.cpp +++ b/samples/cpp/matching_to_many_images.cpp @@ -95,7 +95,7 @@ static bool readImages( const string& queryImageName, const string& trainFilenam Mat& queryImage, vector & trainImages, vector& trainImageNames ) { cout << "< Reading the images..." << endl; - queryImage = imread( queryImageName, CV_LOAD_IMAGE_GRAYSCALE); + queryImage = imread( queryImageName, IMREAD_GRAYSCALE); if( queryImage.empty() ) { cout << "Query image can not be read." << endl << ">" << endl; @@ -112,7 +112,7 @@ static bool readImages( const string& queryImageName, const string& trainFilenam for( size_t i = 0; i < trainImageNames.size(); i++ ) { string filename = trainDirName + trainImageNames[i]; - Mat img = imread( filename, CV_LOAD_IMAGE_GRAYSCALE ); + Mat img = imread( filename, IMREAD_GRAYSCALE ); if( img.empty() ) cout << "Train image " << filename << " can not be read." << endl; else diff --git a/samples/cpp/meanshift_segmentation.cpp b/samples/cpp/meanshift_segmentation.cpp index ce34f6ea7f..eb71619dad 100644 --- a/samples/cpp/meanshift_segmentation.cpp +++ b/samples/cpp/meanshift_segmentation.cpp @@ -65,7 +65,7 @@ int main(int argc, char** argv) colorRad = 10; maxPyrLevel = 1; - namedWindow( winName, CV_WINDOW_AUTOSIZE ); + namedWindow( winName, WINDOW_AUTOSIZE ); createTrackbar( "spatialRad", winName, &spatialRad, 80, meanShiftSegmentation ); createTrackbar( "colorRad", winName, &colorRad, 60, meanShiftSegmentation ); diff --git a/samples/cpp/minarea.cpp b/samples/cpp/minarea.cpp index eca5dc1d36..13ac7c6d52 100644 --- a/samples/cpp/minarea.cpp +++ b/samples/cpp/minarea.cpp @@ -45,16 +45,16 @@ int main( int /*argc*/, char** /*argv*/ ) img = Scalar::all(0); for( i = 0; i < count; i++ ) - circle( img, points[i], 3, Scalar(0, 0, 255), CV_FILLED, CV_AA ); + circle( img, points[i], 3, Scalar(0, 0, 255), FILLED, LINE_AA ); for( i = 0; i < 4; i++ ) - line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, CV_AA); + line(img, vtx[i], vtx[(i+1)%4], Scalar(0, 255, 0), 1, LINE_AA); - circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, CV_AA); + circle(img, center, cvRound(radius), Scalar(0, 255, 255), 1, LINE_AA); imshow( "rect & circle", img ); - char key = (char)cvWaitKey(); + char key = (char)waitKey(); if( key == 27 || key == 'q' || key == 'Q' ) // 'ESC' break; } diff --git a/samples/cpp/morphology2.cpp b/samples/cpp/morphology2.cpp index 3052f85d9a..5b927cf023 100644 --- a/samples/cpp/morphology2.cpp +++ b/samples/cpp/morphology2.cpp @@ -79,7 +79,7 @@ int main( int argc, char** argv ) OpenClose(open_close_pos, 0); ErodeDilate(erode_dilate_pos, 0); - c = cvWaitKey(0); + c = waitKey(); if( (char)c == 27 ) break; diff --git a/samples/cpp/openni_capture.cpp b/samples/cpp/openni_capture.cpp index 549f16e6f0..802b474207 100644 --- a/samples/cpp/openni_capture.cpp +++ b/samples/cpp/openni_capture.cpp @@ -79,8 +79,8 @@ static void colorizeDisparity( const Mat& gray, Mat& rgb, double maxDisp=-1.f, f static float getMaxDisparity( VideoCapture& capture ) { const int minDistance = 400; // mm - float b = (float)capture.get( CV_CAP_OPENNI_DEPTH_GENERATOR_BASELINE ); // mm - float F = (float)capture.get( CV_CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH ); // pixels + float b = (float)capture.get( CAP_OPENNI_DEPTH_GENERATOR_BASELINE ); // mm + float F = (float)capture.get( CAP_OPENNI_DEPTH_GENERATOR_FOCAL_LENGTH ); // pixels return b * F / minDistance; } @@ -142,7 +142,7 @@ static void parseCommandLine( int argc, char* argv[], bool& isColorizeDisp, bool { string mask( argv[++i] ); if( mask.size() != 5) - CV_Error( CV_StsBadArg, "Incorrect length of -m argument string" ); + CV_Error( Error::StsBadArg, "Incorrect length of -m argument string" ); int val = atoi(mask.c_str()); int l = 100000, r = 10000, sum = 0; @@ -191,7 +191,7 @@ int main( int argc, char* argv[] ) if( isVideoReading ) capture.open( filename ); else - capture.open( CV_CAP_OPENNI ); + capture.open( CAP_OPENNI ); cout << "done." << endl; @@ -207,23 +207,23 @@ int main( int argc, char* argv[] ) switch ( imageMode ) { case 0: - modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ ); + modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_VGA_30HZ ); break; case 1: - modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_15HZ ); + modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_SXGA_15HZ ); break; case 2: - modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_30HZ ); + modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_SXGA_30HZ ); break; //The following modes are only supported by the Xtion Pro Live case 3: - modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_30HZ ); + modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_QVGA_30HZ ); break; case 4: - modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_60HZ ); + modeRes = capture.set( CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CAP_OPENNI_QVGA_60HZ ); break; default: - CV_Error( CV_StsBadArg, "Unsupported image mode property.\n"); + CV_Error( Error::StsBadArg, "Unsupported image mode property.\n"); } if (!modeRes) cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl; @@ -231,18 +231,18 @@ int main( int argc, char* argv[] ) // Print some avalible device settings. cout << "\nDepth generator output mode:" << endl << - "FRAME_WIDTH " << capture.get( CV_CAP_PROP_FRAME_WIDTH ) << endl << - "FRAME_HEIGHT " << capture.get( CV_CAP_PROP_FRAME_HEIGHT ) << endl << - "FRAME_MAX_DEPTH " << capture.get( CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl << - "FPS " << capture.get( CV_CAP_PROP_FPS ) << endl << - "REGISTRATION " << capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) << endl; - if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) ) + "FRAME_WIDTH " << capture.get( CAP_PROP_FRAME_WIDTH ) << endl << + "FRAME_HEIGHT " << capture.get( CAP_PROP_FRAME_HEIGHT ) << endl << + "FRAME_MAX_DEPTH " << capture.get( CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl << + "FPS " << capture.get( CAP_PROP_FPS ) << endl << + "REGISTRATION " << capture.get( CAP_PROP_OPENNI_REGISTRATION ) << endl; + if( capture.get( CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) ) { cout << "\nImage generator output mode:" << endl << - "FRAME_WIDTH " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl << - "FRAME_HEIGHT " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl << - "FPS " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl; + "FRAME_WIDTH " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FRAME_WIDTH ) << endl << + "FRAME_HEIGHT " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FRAME_HEIGHT ) << endl << + "FPS " << capture.get( CAP_OPENNI_IMAGE_GENERATOR+CAP_PROP_FPS ) << endl; } else { @@ -266,14 +266,14 @@ int main( int argc, char* argv[] ) } else { - if( retrievedImageFlags[0] && capture.retrieve( depthMap, CV_CAP_OPENNI_DEPTH_MAP ) ) + if( retrievedImageFlags[0] && capture.retrieve( depthMap, CAP_OPENNI_DEPTH_MAP ) ) { const float scaleFactor = 0.05f; Mat show; depthMap.convertTo( show, CV_8UC1, scaleFactor ); imshow( "depth map", show ); } - if( retrievedImageFlags[1] && capture.retrieve( disparityMap, CV_CAP_OPENNI_DISPARITY_MAP ) ) + if( retrievedImageFlags[1] && capture.retrieve( disparityMap, CAP_OPENNI_DISPARITY_MAP ) ) { if( isColorizeDisp ) { @@ -289,13 +289,13 @@ int main( int argc, char* argv[] ) } } - if( retrievedImageFlags[2] && capture.retrieve( validDepthMap, CV_CAP_OPENNI_VALID_DEPTH_MASK ) ) + if( retrievedImageFlags[2] && capture.retrieve( validDepthMap, CAP_OPENNI_VALID_DEPTH_MASK ) ) imshow( "valid depth mask", validDepthMap ); - if( retrievedImageFlags[3] && capture.retrieve( bgrImage, CV_CAP_OPENNI_BGR_IMAGE ) ) + if( retrievedImageFlags[3] && capture.retrieve( bgrImage, CAP_OPENNI_BGR_IMAGE ) ) imshow( "rgb image", bgrImage ); - if( retrievedImageFlags[4] && capture.retrieve( grayImage, CV_CAP_OPENNI_GRAY_IMAGE ) ) + if( retrievedImageFlags[4] && capture.retrieve( grayImage, CAP_OPENNI_GRAY_IMAGE ) ) imshow( "gray image", grayImage ); } diff --git a/samples/cpp/pca.cpp b/samples/cpp/pca.cpp index ed23c7622b..d4272736c2 100644 --- a/samples/cpp/pca.cpp +++ b/samples/cpp/pca.cpp @@ -54,7 +54,7 @@ static void read_imgList(const string& filename, vector& images) { std::ifstream file(filename.c_str(), ifstream::in); if (!file) { string error_message = "No valid input file was given, please check the given filename."; - CV_Error(CV_StsBadArg, error_message); + CV_Error(Error::StsBadArg, error_message); } string line; while (getline(file, line)) { @@ -78,7 +78,7 @@ static Mat toGrayscale(InputArray _src) { Mat src = _src.getMat(); // only allow one channel if(src.channels() != 1) { - CV_Error(CV_StsBadArg, "Only Matrices with one channel are supported"); + CV_Error(Error::StsBadArg, "Only Matrices with one channel are supported"); } // create and return normalized image Mat dst; @@ -104,7 +104,7 @@ static void onTrackbar(int pos, void* ptr) struct params *p = (struct params *)ptr; - p->pca = PCA(p->data, cv::Mat(), CV_PCA_DATA_AS_ROW, var); + p->pca = PCA(p->data, cv::Mat(), PCA::DATA_AS_ROW, var); Mat point = p->pca.project(p->data.row(0)); Mat reconstruction = p->pca.backProject(point); @@ -142,14 +142,14 @@ int main(int argc, char** argv) // Quit if there are not enough images for this demo. if(images.size() <= 1) { string error_message = "This demo needs at least 2 images to work. Please add more images to your data set!"; - CV_Error(CV_StsError, error_message); + CV_Error(Error::StsError, error_message); } // Reshape and stack images into a rowMatrix Mat data = formatImagesForPCA(images); // perform PCA - PCA pca(data, cv::Mat(), CV_PCA_DATA_AS_ROW, 0.95); // trackbar is initially set here, also this is a common value for retainedVariance + PCA pca(data, cv::Mat(), PCA::DATA_AS_ROW, 0.95); // trackbar is initially set here, also this is a common value for retainedVariance // Demonstration of the effect of retainedVariance on the first image Mat point = pca.project(data.row(0)); // project into the eigenspace, thus the image becomes a "point" @@ -159,7 +159,7 @@ int main(int argc, char** argv) // init highgui window string winName = "Reconstruction | press 'q' to quit"; - namedWindow(winName, CV_WINDOW_NORMAL); + namedWindow(winName, WINDOW_NORMAL); // params struct to pass to the trackbar handler params p; diff --git a/samples/cpp/phase_corr.cpp b/samples/cpp/phase_corr.cpp index 97172adc38..2b1f2378b3 100644 --- a/samples/cpp/phase_corr.cpp +++ b/samples/cpp/phase_corr.cpp @@ -31,8 +31,8 @@ int main(int, char* []) { // draw a circle and line indicating the shift direction... Point center(curr.cols >> 1, curr.rows >> 1); - circle(frame, center, (int)radius, Scalar(0, 255, 0), 3, CV_AA); - line(frame, center, Point(center.x + (int)shift.x, center.y + (int)shift.y), Scalar(0, 255, 0), 3, CV_AA); + circle(frame, center, (int)radius, Scalar(0, 255, 0), 3, LINE_AA); + line(frame, center, Point(center.x + (int)shift.x, center.y + (int)shift.y), Scalar(0, 255, 0), 3, LINE_AA); } imshow("phase shift", frame); diff --git a/samples/cpp/points_classifier.cpp b/samples/cpp/points_classifier.cpp index 8bc35e5541..b197b5be59 100644 --- a/samples/cpp/points_classifier.cpp +++ b/samples/cpp/points_classifier.cpp @@ -36,7 +36,7 @@ static void on_mouse( int event, int x, int y, int /*flags*/, void* ) int updateFlag = 0; - if( event == CV_EVENT_LBUTTONUP ) + if( event == EVENT_LBUTTONUP ) { if( classColors.empty() ) return; @@ -45,7 +45,7 @@ static void on_mouse( int event, int x, int y, int /*flags*/, void* ) trainedPointsMarkers.push_back( (int)(classColors.size()-1) ); updateFlag = true; } - else if( event == CV_EVENT_RBUTTONUP ) + else if( event == EVENT_RBUTTONUP ) { #if _BT_ if( classColors.size() < 2 ) @@ -503,7 +503,7 @@ int main() imgDst.create( 480, 640, CV_8UC3 ); imshow( "points", img ); - cvSetMouseCallback( "points", on_mouse ); + setMouseCallback( "points", on_mouse ); for(;;) { diff --git a/samples/cpp/segment_objects.cpp b/samples/cpp/segment_objects.cpp index 8555a0e602..d44b035342 100644 --- a/samples/cpp/segment_objects.cpp +++ b/samples/cpp/segment_objects.cpp @@ -53,7 +53,7 @@ static void refineSegments(const Mat& img, Mat& mask, Mat& dst) } } Scalar color( 0, 0, 255 ); - drawContours( dst, contours, largestComp, color, CV_FILLED, 8, hierarchy ); + drawContours( dst, contours, largestComp, color, FILLED, LINE_8, hierarchy ); } diff --git a/samples/cpp/select3dobj.cpp b/samples/cpp/select3dobj.cpp index 8652f7ebfe..a6aa2a7f4e 100644 --- a/samples/cpp/select3dobj.cpp +++ b/samples/cpp/select3dobj.cpp @@ -212,11 +212,11 @@ static int select3DBox(const string& windowname, const string& selWinName, const for(;;) { float Z = 0.f; - bool dragging = (mouse.buttonState & CV_EVENT_FLAG_LBUTTON) != 0; + bool dragging = (mouse.buttonState & EVENT_FLAG_LBUTTON) != 0; int npt = nobjpt; - if( (mouse.event == CV_EVENT_LBUTTONDOWN || - mouse.event == CV_EVENT_LBUTTONUP || + if( (mouse.event == EVENT_LBUTTONDOWN || + mouse.event == EVENT_LBUTTONUP || dragging) && nobjpt < 4 ) { Point2f m = mouse.pt; @@ -259,9 +259,9 @@ static int select3DBox(const string& windowname, const string& selWinName, const } box[npt] = image2plane(imgpt[npt], R, tvec, cameraMatrix, npt<3 ? 0 : Z); - if( (npt == 0 && mouse.event == CV_EVENT_LBUTTONDOWN) || + if( (npt == 0 && mouse.event == EVENT_LBUTTONDOWN) || (npt > 0 && norm(box[npt] - box[npt-1]) > eps && - mouse.event == CV_EVENT_LBUTTONUP) ) + mouse.event == EVENT_LBUTTONUP) ) { nobjpt++; if( nobjpt < 4 ) diff --git a/samples/cpp/squares.cpp b/samples/cpp/squares.cpp index 11887c1ca0..392075d79f 100644 --- a/samples/cpp/squares.cpp +++ b/samples/cpp/squares.cpp @@ -132,7 +132,7 @@ static void drawSquares( Mat& image, const vector >& squares ) { const Point* p = &squares[i][0]; int n = (int)squares[i].size(); - polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, CV_AA); + polylines(image, &p, &n, 1, true, Scalar(0,255,0), 3, LINE_AA); } imshow(wndname, image); diff --git a/samples/cpp/starter_imagelist.cpp b/samples/cpp/starter_imagelist.cpp index 61585a515f..fe89579301 100644 --- a/samples/cpp/starter_imagelist.cpp +++ b/samples/cpp/starter_imagelist.cpp @@ -46,15 +46,15 @@ bool readStringList(const string& filename, vector& l) int process(vector images) { - namedWindow("image",CV_WINDOW_KEEPRATIO); //resizable window; - for (size_t i = 0; i < images.size(); i++) - { - Mat image = imread(images[i], CV_LOAD_IMAGE_GRAYSCALE); // do grayscale processing? - imshow("image",image); - cout << "Press a key to see the next image in the list." << endl; - waitKey(); // wait indefinitely for a key to be pressed - } - return 0; + namedWindow("image", WINDOW_KEEPRATIO); //resizable window; + for (size_t i = 0; i < images.size(); i++) + { + Mat image = imread(images[i], IMREAD_GRAYSCALE); // do grayscale processing? + imshow("image",image); + cout << "Press a key to see the next image in the list." << endl; + waitKey(); // wait indefinitely for a key to be pressed + } + return 0; } } diff --git a/samples/cpp/starter_video.cpp b/samples/cpp/starter_video.cpp index b8f4086c45..5bceb552cd 100644 --- a/samples/cpp/starter_video.cpp +++ b/samples/cpp/starter_video.cpp @@ -36,7 +36,7 @@ namespace { char filename[200]; string window_name = "video | q or esc to quit"; cout << "press space to save a picture. q or esc to quit" << endl; - namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window; + namedWindow(window_name, WINDOW_KEEPRATIO); //resizable window; Mat frame; for (;;) { capture >> frame; diff --git a/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video-input-psnr-ssim.cpp b/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video-input-psnr-ssim.cpp index 56f6e7dfa1..bee1c5b200 100644 --- a/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video-input-psnr-ssim.cpp +++ b/samples/cpp/tutorial_code/HighGUI/video-input-psnr-ssim/video-input-psnr-ssim.cpp @@ -60,10 +60,10 @@ int main(int argc, char *argv[]) return -1; } - Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH), - (int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)), - uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH), - (int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT)); + Size refS = Size((int) captRefrnc.get(CAP_PROP_FRAME_WIDTH), + (int) captRefrnc.get(CAP_PROP_FRAME_HEIGHT)), + uTSi = Size((int) captUndTst.get(CAP_PROP_FRAME_WIDTH), + (int) captUndTst.get(CAP_PROP_FRAME_HEIGHT)); if (refS != uTSi) { @@ -75,13 +75,13 @@ int main(int argc, char *argv[]) const char* WIN_RF = "Reference"; // Windows - namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE); - namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE); - cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0) - cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2 + namedWindow(WIN_RF, WINDOW_AUTOSIZE); + namedWindow(WIN_UT, WINDOW_AUTOSIZE); + moveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0) + moveWindow(WIN_UT, refS.width, 0); //1500, 2 cout << "Reference frame resolution: Width=" << refS.width << " Height=" << refS.height - << " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl; + << " of nr#: " << captRefrnc.get(CAP_PROP_FRAME_COUNT) << endl; cout << "PSNR trigger value " << setiosflags(ios::fixed) << setprecision(3) << psnrTriggerValue << endl; @@ -125,7 +125,7 @@ int main(int argc, char *argv[]) imshow(WIN_RF, frameReference); imshow(WIN_UT, frameUnderTest); - c = (char)cvWaitKey(delay); + c = (char)waitKey(delay); if (c == 27) break; } diff --git a/samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp b/samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp index 8ee99a10aa..d3b8e44303 100644 --- a/samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp +++ b/samples/cpp/tutorial_code/HighGUI/video-write/video-write.cpp @@ -41,19 +41,19 @@ int main(int argc, char *argv[]) string::size_type pAt = source.find_last_of('.'); // Find extension point const string NAME = source.substr(0, pAt) + argv[2][0] + ".avi"; // Form the new name with container - int ex = static_cast(inputVideo.get(CV_CAP_PROP_FOURCC)); // Get Codec Type- Int form + int ex = static_cast(inputVideo.get(CAP_PROP_FOURCC)); // Get Codec Type- Int form // Transform from int to char via Bitwise operators char EXT[] = {(char)(ex & 0XFF) , (char)((ex & 0XFF00) >> 8),(char)((ex & 0XFF0000) >> 16),(char)((ex & 0XFF000000) >> 24), 0}; - Size S = Size((int) inputVideo.get(CV_CAP_PROP_FRAME_WIDTH), // Acquire input size - (int) inputVideo.get(CV_CAP_PROP_FRAME_HEIGHT)); + Size S = Size((int) inputVideo.get(CAP_PROP_FRAME_WIDTH), // Acquire input size + (int) inputVideo.get(CAP_PROP_FRAME_HEIGHT)); VideoWriter outputVideo; // Open the output if (askOutputType) - outputVideo.open(NAME, ex=-1, inputVideo.get(CV_CAP_PROP_FPS), S, true); + outputVideo.open(NAME, ex=-1, inputVideo.get(CAP_PROP_FPS), S, true); else - outputVideo.open(NAME, ex, inputVideo.get(CV_CAP_PROP_FPS), S, true); + outputVideo.open(NAME, ex, inputVideo.get(CAP_PROP_FPS), S, true); if (!outputVideo.isOpened()) { @@ -62,7 +62,7 @@ int main(int argc, char *argv[]) } cout << "Input frame resolution: Width=" << S.width << " Height=" << S.height - << " of nr#: " << inputVideo.get(CV_CAP_PROP_FRAME_COUNT) << endl; + << " of nr#: " << inputVideo.get(CAP_PROP_FRAME_COUNT) << endl; cout << "Input codec type: " << EXT << endl; int channel = 2; // Select the channel to save diff --git a/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp index 5b5d9899e8..38a5839bb2 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/EqualizeHist_Demo.cpp @@ -37,8 +37,8 @@ int main( int, char** argv ) equalizeHist( src, dst ); /// Display results - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); - namedWindow( equalized_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); + namedWindow( equalized_window, WINDOW_AUTOSIZE ); imshow( source_window, src ); imshow( equalized_window, dst ); diff --git a/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp index ffbc30bb18..b41cf94be5 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/MatchTemplate_Demo.cpp @@ -33,8 +33,8 @@ int main( int, char** argv ) templ = imread( argv[2], 1 ); /// Create windows - namedWindow( image_window, CV_WINDOW_AUTOSIZE ); - namedWindow( result_window, CV_WINDOW_AUTOSIZE ); + namedWindow( image_window, WINDOW_AUTOSIZE ); + namedWindow( result_window, WINDOW_AUTOSIZE ); /// Create Trackbar const char* trackbar_label = "Method: \n 0: SQDIFF \n 1: SQDIFF NORMED \n 2: TM CCORR \n 3: TM CCORR NORMED \n 4: TM COEFF \n 5: TM COEFF NORMED"; diff --git a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp index e4a0af84b3..86d2f2e154 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo1.cpp @@ -37,7 +37,7 @@ int main( int, char** argv ) /// Create Trackbar to enter the number of bins const char* window_image = "Source image"; - namedWindow( window_image, CV_WINDOW_AUTOSIZE ); + namedWindow( window_image, WINDOW_AUTOSIZE ); createTrackbar("* Hue bins: ", window_image, &bins, 180, Hist_and_Backproj ); Hist_and_Backproj(0, 0); diff --git a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp index c908cd83f1..28181c2d31 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/calcBackProject_Demo2.cpp @@ -34,7 +34,7 @@ int main( int, char** argv ) cvtColor( src, hsv, COLOR_BGR2HSV ); /// Show the image - namedWindow( window_image, CV_WINDOW_AUTOSIZE ); + namedWindow( window_image, WINDOW_AUTOSIZE ); imshow( window_image, src ); /// Set Trackbars for floodfill thresholds @@ -52,7 +52,7 @@ int main( int, char** argv ) */ void pickPoint (int event, int x, int y, int, void* ) { - if( event != CV_EVENT_LBUTTONDOWN ) + if( event != EVENT_LBUTTONDOWN ) { return; } // Fill and get the mask diff --git a/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp b/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp index d3af5e7a96..577c8a8b9c 100644 --- a/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp +++ b/samples/cpp/tutorial_code/Histograms_Matching/calcHist_Demo.cpp @@ -71,7 +71,7 @@ int main( int, char** argv ) } /// Display - namedWindow("calcHist Demo", CV_WINDOW_AUTOSIZE ); + namedWindow("calcHist Demo", WINDOW_AUTOSIZE ); imshow("calcHist Demo", histImage ); waitKey(0); diff --git a/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp b/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp index 47030fa4f4..13a96a1f51 100644 --- a/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Morphology_1.cpp @@ -37,9 +37,9 @@ int main( int, char** argv ) { return -1; } /// Create windows - namedWindow( "Erosion Demo", CV_WINDOW_AUTOSIZE ); - namedWindow( "Dilation Demo", CV_WINDOW_AUTOSIZE ); - cvMoveWindow( "Dilation Demo", src.cols, 0 ); + namedWindow( "Erosion Demo", WINDOW_AUTOSIZE ); + namedWindow( "Dilation Demo", WINDOW_AUTOSIZE ); + moveWindow( "Dilation Demo", src.cols, 0 ); /// Create Erosion Trackbar createTrackbar( "Element:\n 0: Rect \n 1: Cross \n 2: Ellipse", "Erosion Demo", diff --git a/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp b/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp index c501bfd328..ec00768035 100644 --- a/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Morphology_2.cpp @@ -39,7 +39,7 @@ int main( int, char** argv ) { return -1; } /// Create window - namedWindow( window_name, CV_WINDOW_AUTOSIZE ); + namedWindow( window_name, WINDOW_AUTOSIZE ); /// Create Trackbar to select Morphology operation createTrackbar("Operator:\n 0: Opening - 1: Closing \n 2: Gradient - 3: Top Hat \n 4: Black Hat", window_name, &morph_operator, max_operator, Morphology_Operations ); diff --git a/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp b/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp index d0cae3c4f6..48ec8286cb 100644 --- a/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Pyramids.cpp @@ -40,7 +40,7 @@ int main( void ) dst = tmp; /// Create window - namedWindow( window_name, CV_WINDOW_AUTOSIZE ); + namedWindow( window_name, WINDOW_AUTOSIZE ); imshow( window_name, dst ); /// Loop diff --git a/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp b/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp index 5f51d07d29..8513bcf76a 100644 --- a/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Smoothing.cpp @@ -31,7 +31,7 @@ int display_dst( int delay ); */ int main( void ) { - namedWindow( window_name, CV_WINDOW_AUTOSIZE ); + namedWindow( window_name, WINDOW_AUTOSIZE ); /// Load the source image src = imread( "../images/lena.png", 1 ); @@ -89,7 +89,7 @@ int display_caption( const char* caption ) dst = Mat::zeros( src.size(), src.type() ); putText( dst, caption, Point( src.cols/4, src.rows/2), - CV_FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) ); + FONT_HERSHEY_COMPLEX, 1, Scalar(255, 255, 255) ); imshow( window_name, dst ); int c = waitKey( DELAY_CAPTION ); diff --git a/samples/cpp/tutorial_code/ImgProc/Threshold.cpp b/samples/cpp/tutorial_code/ImgProc/Threshold.cpp index 7ba05db3c9..1dff6c86ed 100644 --- a/samples/cpp/tutorial_code/ImgProc/Threshold.cpp +++ b/samples/cpp/tutorial_code/ImgProc/Threshold.cpp @@ -40,7 +40,7 @@ int main( int, char** argv ) cvtColor( src, src_gray, COLOR_RGB2GRAY ); /// Create a window to display results - namedWindow( window_name, CV_WINDOW_AUTOSIZE ); + namedWindow( window_name, WINDOW_AUTOSIZE ); /// Create Trackbar to choose type of Threshold createTrackbar( trackbar_type, diff --git a/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp index f1455094a7..7851c9f58b 100644 --- a/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/CannyDetector_Demo.cpp @@ -61,7 +61,7 @@ int main( int, char** argv ) cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Create a window - namedWindow( window_name, CV_WINDOW_AUTOSIZE ); + namedWindow( window_name, WINDOW_AUTOSIZE ); /// Create a Trackbar for user to enter threshold createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold ); diff --git a/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp index 2d18357202..0a2a2f5fa4 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Geometric_Transforms_Demo.cpp @@ -65,13 +65,13 @@ int main( int, char** argv ) /// Show what you got - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, src ); - namedWindow( warp_window, CV_WINDOW_AUTOSIZE ); + namedWindow( warp_window, WINDOW_AUTOSIZE ); imshow( warp_window, warp_dst ); - namedWindow( warp_rotate_window, CV_WINDOW_AUTOSIZE ); + namedWindow( warp_rotate_window, WINDOW_AUTOSIZE ); imshow( warp_rotate_window, warp_rotate_dst ); /// Wait until user exits the program diff --git a/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp index 09a6cefde2..b4cda82f1b 100644 --- a/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp @@ -48,7 +48,7 @@ int main(int, char** argv) } /// Show your results - namedWindow( "Hough Circle Transform Demo", CV_WINDOW_AUTOSIZE ); + namedWindow( "Hough Circle Transform Demo", WINDOW_AUTOSIZE ); imshow( "Hough Circle Transform Demo", src ); waitKey(0); diff --git a/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp index e4064e1aa7..6400c43794 100644 --- a/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/HoughLines_Demo.cpp @@ -55,10 +55,10 @@ int main( int, char** argv ) char thresh_label[50]; sprintf( thresh_label, "Thres: %d + input", min_threshold ); - namedWindow( standard_name, CV_WINDOW_AUTOSIZE ); + namedWindow( standard_name, WINDOW_AUTOSIZE ); createTrackbar( thresh_label, standard_name, &s_trackbar, max_trackbar, Standard_Hough); - namedWindow( probabilistic_name, CV_WINDOW_AUTOSIZE ); + namedWindow( probabilistic_name, WINDOW_AUTOSIZE ); createTrackbar( thresh_label, probabilistic_name, &p_trackbar, max_trackbar, Probabilistic_Hough); /// Initialize @@ -100,7 +100,7 @@ void Standard_Hough( int, void* ) Point pt1( cvRound(x0 + alpha*(-sin_t)), cvRound(y0 + alpha*cos_t) ); Point pt2( cvRound(x0 - alpha*(-sin_t)), cvRound(y0 - alpha*cos_t) ); - line( standard_hough, pt1, pt2, Scalar(255,0,0), 3, CV_AA); + line( standard_hough, pt1, pt2, Scalar(255,0,0), 3, LINE_AA); } imshow( standard_name, standard_hough ); @@ -121,7 +121,7 @@ void Probabilistic_Hough( int, void* ) for( size_t i = 0; i < p_lines.size(); i++ ) { Vec4i l = p_lines[i]; - line( probabilistic_hough, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255,0,0), 3, CV_AA); + line( probabilistic_hough, Point(l[0], l[1]), Point(l[2], l[3]), Scalar(255,0,0), 3, LINE_AA); } imshow( probabilistic_name, probabilistic_hough ); diff --git a/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp index f9033df471..f6dff102d8 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Laplace_Demo.cpp @@ -37,7 +37,7 @@ int main( int, char** argv ) cvtColor( src, src_gray, COLOR_RGB2GRAY ); /// Create window - namedWindow( window_name, CV_WINDOW_AUTOSIZE ); + namedWindow( window_name, WINDOW_AUTOSIZE ); /// Apply Laplace function Mat abs_dst; diff --git a/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp index 814a01c4c4..86b6a2cb60 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Remap_Demo.cpp @@ -34,7 +34,7 @@ int main( int, char** argv ) map_y.create( src.size(), CV_32FC1 ); /// Create window - namedWindow( remap_window, CV_WINDOW_AUTOSIZE ); + namedWindow( remap_window, WINDOW_AUTOSIZE ); /// Loop for(;;) diff --git a/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp index 3a8130b332..a5498f715e 100644 --- a/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/Sobel_Demo.cpp @@ -36,7 +36,7 @@ int main( int, char** argv ) cvtColor( src, src_gray, COLOR_RGB2GRAY ); /// Create window - namedWindow( window_name, CV_WINDOW_AUTOSIZE ); + namedWindow( window_name, WINDOW_AUTOSIZE ); /// Generate grad_x and grad_y Mat grad_x, grad_y; diff --git a/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp b/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp index 4b2783e31c..53120b7800 100644 --- a/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/copyMakeBorder_demo.cpp @@ -44,7 +44,7 @@ int main( int, char** argv ) printf( " ** Press 'ESC' to exit the program \n"); /// Create window - namedWindow( window_name, CV_WINDOW_AUTOSIZE ); + namedWindow( window_name, WINDOW_AUTOSIZE ); /// Initialize arguments for the filter top = (int) (0.05*src.rows); bottom = (int) (0.05*src.rows); diff --git a/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp b/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp index 7a8dc768cf..86db4d8ed4 100644 --- a/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/filter2D_demo.cpp @@ -35,7 +35,7 @@ int main ( int, char** argv ) { return -1; } /// Create window - namedWindow( window_name, CV_WINDOW_AUTOSIZE ); + namedWindow( window_name, WINDOW_AUTOSIZE ); /// Initialize arguments for the filter anchor = Point( -1, -1 ); diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp index 01a4f1c73a..5eb2d92a7e 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/findContours_demo.cpp @@ -35,7 +35,7 @@ int main( int, char** argv ) /// Create Window const char* source_window = "Source"; - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, src ); createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback ); @@ -68,6 +68,6 @@ void thresh_callback(int, void* ) } /// Show in a window - namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); + namedWindow( "Contours", WINDOW_AUTOSIZE ); imshow( "Contours", drawing ); } diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp index dfc27e56ce..d481d03898 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo1.cpp @@ -35,7 +35,7 @@ int main( int, char** argv ) /// Create Window const char* source_window = "Source"; - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, src ); createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback ); @@ -83,6 +83,6 @@ void thresh_callback(int, void* ) } /// Show in a window - namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); + namedWindow( "Contours", WINDOW_AUTOSIZE ); imshow( "Contours", drawing ); } diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp index cb93de231a..e1f44712b7 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/generalContours_demo2.cpp @@ -35,7 +35,7 @@ int main( int, char** argv ) /// Create Window const char* source_window = "Source"; - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, src ); createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback ); @@ -85,6 +85,6 @@ void thresh_callback(int, void* ) } /// Show in a window - namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); + namedWindow( "Contours", WINDOW_AUTOSIZE ); imshow( "Contours", drawing ); } diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp index 35c6f4db39..395b4b39f0 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/hull_demo.cpp @@ -35,7 +35,7 @@ int main( int, char** argv ) /// Create Window const char* source_window = "Source"; - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, src ); createTrackbar( " Threshold:", "Source", &thresh, max_thresh, thresh_callback ); @@ -76,6 +76,6 @@ void thresh_callback(int, void* ) } /// Show in a window - namedWindow( "Hull demo", CV_WINDOW_AUTOSIZE ); + namedWindow( "Hull demo", WINDOW_AUTOSIZE ); imshow( "Hull demo", drawing ); } diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp index 277a06040e..61dcd70016 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/moments_demo.cpp @@ -35,7 +35,7 @@ int main( int, char** argv ) /// Create Window const char* source_window = "Source"; - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, src ); createTrackbar( " Canny thresh:", "Source", &thresh, max_thresh, thresh_callback ); @@ -79,7 +79,7 @@ void thresh_callback(int, void* ) } /// Show in a window - namedWindow( "Contours", CV_WINDOW_AUTOSIZE ); + namedWindow( "Contours", WINDOW_AUTOSIZE ); imshow( "Contours", drawing ); /// Calculate the area with the moments 00 and compare with the result of the OpenCV function diff --git a/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp b/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp index c15bc22c6f..23f794a90b 100644 --- a/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp +++ b/samples/cpp/tutorial_code/ShapeDescriptors/pointPolygonTest_demo.cpp @@ -71,9 +71,9 @@ int main( void ) /// Create Window and show your results const char* source_window = "Source"; - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, src ); - namedWindow( "Distance", CV_WINDOW_AUTOSIZE ); + namedWindow( "Distance", WINDOW_AUTOSIZE ); imshow( "Distance", drawing ); waitKey(0); diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp index 7184f94877..dafc4be943 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerDetector_Demo.cpp @@ -64,7 +64,7 @@ int main( int, char** argv ) minMaxLoc( Mc, &myHarris_minVal, &myHarris_maxVal, 0, 0, Mat() ); /* Create Window and Trackbar */ - namedWindow( myHarris_window, CV_WINDOW_AUTOSIZE ); + namedWindow( myHarris_window, WINDOW_AUTOSIZE ); createTrackbar( " Quality Level:", myHarris_window, &myHarris_qualityLevel, max_qualityLevel, myHarris_function ); myHarris_function( 0, 0 ); @@ -75,7 +75,7 @@ int main( int, char** argv ) minMaxLoc( myShiTomasi_dst, &myShiTomasi_minVal, &myShiTomasi_maxVal, 0, 0, Mat() ); /* Create Window and Trackbar */ - namedWindow( myShiTomasi_window, CV_WINDOW_AUTOSIZE ); + namedWindow( myShiTomasi_window, WINDOW_AUTOSIZE ); createTrackbar( " Quality Level:", myShiTomasi_window, &myShiTomasi_qualityLevel, max_qualityLevel, myShiTomasi_function ); myShiTomasi_function( 0, 0 ); diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp index e77500bbfe..e048f057b2 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerHarris_Demo.cpp @@ -34,7 +34,7 @@ int main( int, char** argv ) cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Create a window and a trackbar - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); createTrackbar( "Threshold: ", source_window, &thresh, max_thresh, cornerHarris_demo ); imshow( source_window, src ); @@ -77,6 +77,6 @@ void cornerHarris_demo( int, void* ) } } /// Showing the result - namedWindow( corners_window, CV_WINDOW_AUTOSIZE ); + namedWindow( corners_window, WINDOW_AUTOSIZE ); imshow( corners_window, dst_norm_scaled ); } diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp index d59d258aaf..320eb100a5 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp @@ -35,7 +35,7 @@ int main( int, char** argv ) cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Create Window - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); /// Create Trackbar to set the number of corners createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo ); @@ -87,13 +87,13 @@ void goodFeaturesToTrack_Demo( int, void* ) { circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); } /// Show what you got - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, copy ); /// Set the neeed parameters to find the refined corners Size winSize = Size( 5, 5 ); Size zeroZone = Size( -1, -1 ); - TermCriteria criteria = TermCriteria( CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 40, 0.001 ); + TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 40, 0.001 ); /// Calculate the refined corner locations cornerSubPix( src_gray, corners, winSize, zeroZone, criteria ); diff --git a/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp index 0f852d28cc..a0f6d125c1 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/goodFeaturesToTrack_Demo.cpp @@ -35,7 +35,7 @@ int main( int, char** argv ) cvtColor( src, src_gray, COLOR_BGR2GRAY ); /// Create Window - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); /// Create Trackbar to set the number of corners createTrackbar( "Max corners:", source_window, &maxCorners, maxTrackbar, goodFeaturesToTrack_Demo ); @@ -87,7 +87,7 @@ void goodFeaturesToTrack_Demo( int, void* ) { circle( copy, corners[i], r, Scalar(rng.uniform(0,255), rng.uniform(0,255), rng.uniform(0,255)), -1, 8, 0 ); } /// Show what you got - namedWindow( source_window, CV_WINDOW_AUTOSIZE ); + namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, copy ); } diff --git a/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp b/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp index 9b28f9d2ee..a3c3f6ee2a 100644 --- a/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp +++ b/samples/cpp/tutorial_code/calib3d/camera_calibration/camera_calibration.cpp @@ -147,7 +147,7 @@ public: view0.copyTo(result); } else if( atImageList < (int)imageList.size() ) - result = imread(imageList[atImageList++], CV_LOAD_IMAGE_COLOR); + result = imread(imageList[atImageList++], IMREAD_COLOR); return result; } diff --git a/samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp b/samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp index 18244c59fb..64c650bb82 100644 --- a/samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp +++ b/samples/cpp/tutorial_code/calib3d/stereoBM/SBM_Sample.cpp @@ -26,8 +26,8 @@ int main( int argc, char** argv ) { readme(); return -1; } //-- 1. Read the images - Mat imgLeft = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE ); - Mat imgRight = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE ); + Mat imgLeft = imread( argv[1], IMREAD_GRAYSCALE ); + Mat imgRight = imread( argv[2], IMREAD_GRAYSCALE ); //-- And create the image in which we will save our disparities Mat imgDisparity16S = Mat( imgLeft.rows, imgLeft.cols, CV_16S ); Mat imgDisparity8U = Mat( imgLeft.rows, imgLeft.cols, CV_8UC1 ); @@ -54,7 +54,7 @@ int main( int argc, char** argv ) //-- 4. Display it as a CV_8UC1 image imgDisparity16S.convertTo( imgDisparity8U, CV_8UC1, 255/(maxVal - minVal)); - namedWindow( windowDisparity, CV_WINDOW_NORMAL ); + namedWindow( windowDisparity, WINDOW_NORMAL ); imshow( windowDisparity, imgDisparity8U ); //-- 5. Save the image diff --git a/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp b/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp index 3e2437fef2..f10c4963b4 100644 --- a/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp +++ b/samples/cpp/tutorial_code/core/Matrix/Drawing_1.cpp @@ -64,9 +64,9 @@ int main( void ){ /// 3. Display your stuff! imshow( atom_window, atom_image ); - cvMoveWindow( atom_window, 0, 200 ); + moveWindow( atom_window, 0, 200 ); imshow( rook_window, rook_image ); - cvMoveWindow( rook_window, w, 200 ); + moveWindow( rook_window, w, 200 ); waitKey( 0 ); return(0); diff --git a/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp b/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp index c84eccb69e..844bcd27de 100644 --- a/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp +++ b/samples/cpp/tutorial_code/core/Matrix/Drawing_2.cpp @@ -304,7 +304,7 @@ int Displaying_Random_Text( Mat image, char* window_name, RNG rng ) */ int Displaying_Big_End( Mat image, char* window_name, RNG ) { - Size textsize = getTextSize("OpenCV forever!", CV_FONT_HERSHEY_COMPLEX, 3, 5, 0); + Size textsize = getTextSize("OpenCV forever!", FONT_HERSHEY_COMPLEX, 3, 5, 0); Point org((window_width - textsize.width)/2, (window_height - textsize.height)/2); int lineType = 8; @@ -313,7 +313,7 @@ int Displaying_Big_End( Mat image, char* window_name, RNG ) for( int i = 0; i < 255; i += 2 ) { image2 = image - Scalar::all(i); - putText( image2, "OpenCV forever!", org, CV_FONT_HERSHEY_COMPLEX, 3, + putText( image2, "OpenCV forever!", org, FONT_HERSHEY_COMPLEX, 3, Scalar(i, i, 255), 5, lineType ); imshow( window_name, image2 ); diff --git a/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp b/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp index ba54188aee..dd77bb2eb7 100644 --- a/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp +++ b/samples/cpp/tutorial_code/core/discrete_fourier_transform/discrete_fourier_transform.cpp @@ -22,7 +22,7 @@ int main(int argc, char ** argv) const char* filename = argc >=2 ? argv[1] : "lena.jpg"; - Mat I = imread(filename, CV_LOAD_IMAGE_GRAYSCALE); + Mat I = imread(filename, IMREAD_GRAYSCALE); if( I.empty()) return -1; @@ -67,7 +67,7 @@ int main(int argc, char ** argv) q2.copyTo(q1); tmp.copyTo(q2); - normalize(magI, magI, 0, 1, CV_MINMAX); // Transform the matrix with float values into a + normalize(magI, magI, 0, 1, NORM_MINMAX); // Transform the matrix with float values into a // viewable image form (float between values 0 and 1). imshow("Input Image" , I ); // Show the result diff --git a/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp b/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp index 421c8a620f..7bd91ddf6c 100644 --- a/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp +++ b/samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp @@ -36,9 +36,9 @@ int main( int argc, char* argv[]) Mat I, J; if( argc == 4 && !strcmp(argv[3],"G") ) - I = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE); + I = imread(argv[1], IMREAD_GRAYSCALE); else - I = imread(argv[1], CV_LOAD_IMAGE_COLOR); + I = imread(argv[1], IMREAD_COLOR); if (!I.data) { diff --git a/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp b/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp index fd69228a03..6c681d643a 100644 --- a/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp +++ b/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp @@ -22,6 +22,10 @@ static void help( char* progName) // comment out the define to use only the latest C++ API #define DEMO_MIXED_API_USE +#ifdef DEMO_MIXED_API_USE +# include +#endif + int main( int argc, char** argv ) { help(argv[0]); @@ -118,7 +122,7 @@ int main( int argc, char** argv ) cvtColor(I_YUV, I, COLOR_YCrCb2BGR); // and produce the output RGB image - namedWindow("image with grain", CV_WINDOW_AUTOSIZE); // use this to create images + namedWindow("image with grain", WINDOW_AUTOSIZE); // use this to create images #ifdef DEMO_MIXED_API_USE // this is to demonstrate that I and IplI really share the data - the result of the above diff --git a/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp b/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp index 429f52ead9..1c476453d6 100644 --- a/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp +++ b/samples/cpp/tutorial_code/core/mat_mask_operations/mat_mask_operations.cpp @@ -27,12 +27,12 @@ int main( int argc, char* argv[]) Mat I, J, K; if (argc >= 3 && !strcmp("G", argv[2])) - I = imread( filename, CV_LOAD_IMAGE_GRAYSCALE); + I = imread( filename, IMREAD_GRAYSCALE); else - I = imread( filename, CV_LOAD_IMAGE_COLOR); + I = imread( filename, IMREAD_COLOR); - namedWindow("Input", CV_WINDOW_AUTOSIZE); - namedWindow("Output", CV_WINDOW_AUTOSIZE); + namedWindow("Input", WINDOW_AUTOSIZE); + namedWindow("Output", WINDOW_AUTOSIZE); imshow("Input", I); double t = (double)getTickCount(); @@ -43,7 +43,7 @@ int main( int argc, char* argv[]) cout << "Hand written function times passed in seconds: " << t << endl; imshow("Output", J); - cvWaitKey(0); + waitKey(); Mat kern = (Mat_(3,3) << 0, -1, 0, -1, 5, -1, @@ -55,7 +55,7 @@ int main( int argc, char* argv[]) imshow("Output", K); - cvWaitKey(0); + waitKey(); return 0; } void Sharpen(const Mat& myImage,Mat& Result) diff --git a/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp b/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp index bdfd0789c0..5ec82d5d3f 100644 --- a/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp +++ b/samples/cpp/tutorial_code/features2D/SURF_FlannMatcher.cpp @@ -25,8 +25,8 @@ int main( int argc, char** argv ) if( argc != 3 ) { readme(); return -1; } - Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE ); - Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE ); + Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE ); + Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE ); if( !img_1.data || !img_2.data ) { std::cout<< " --(!) Error reading images " << std::endl; return -1; } diff --git a/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp b/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp index cf99d4e6e7..93826053f2 100644 --- a/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp +++ b/samples/cpp/tutorial_code/features2D/SURF_Homography.cpp @@ -26,8 +26,8 @@ int main( int argc, char** argv ) if( argc != 3 ) { readme(); return -1; } - Mat img_object = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE ); - Mat img_scene = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE ); + Mat img_object = imread( argv[1], IMREAD_GRAYSCALE ); + Mat img_scene = imread( argv[2], IMREAD_GRAYSCALE ); if( !img_object.data || !img_scene.data ) { std::cout<< " --(!) Error reading images " << std::endl; return -1; } diff --git a/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp b/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp index 527e5dd951..56067c83ce 100644 --- a/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp +++ b/samples/cpp/tutorial_code/features2D/SURF_descriptor.cpp @@ -24,8 +24,8 @@ int main( int argc, char** argv ) if( argc != 3 ) { return -1; } - Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE ); - Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE ); + Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE ); + Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE ); if( !img_1.data || !img_2.data ) { return -1; } diff --git a/samples/cpp/tutorial_code/features2D/SURF_detector.cpp b/samples/cpp/tutorial_code/features2D/SURF_detector.cpp index 2625f1df3b..a1288a8c85 100644 --- a/samples/cpp/tutorial_code/features2D/SURF_detector.cpp +++ b/samples/cpp/tutorial_code/features2D/SURF_detector.cpp @@ -24,8 +24,8 @@ int main( int argc, char** argv ) if( argc != 3 ) { readme(); return -1; } - Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE ); - Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE ); + Mat img_1 = imread( argv[1], IMREAD_GRAYSCALE ); + Mat img_2 = imread( argv[2], IMREAD_GRAYSCALE ); if( !img_1.data || !img_2.data ) { std::cout<< " --(!) Error reading images " << std::endl; return -1; } diff --git a/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp b/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp index 9f2e31e753..44ef32c0a7 100644 --- a/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp +++ b/samples/cpp/tutorial_code/introduction/display_image/display_image.cpp @@ -14,17 +14,17 @@ int main( int argc, char** argv ) } Mat image; - image = imread(argv[1], CV_LOAD_IMAGE_COLOR); // Read the file + image = imread(argv[1], IMREAD_COLOR); // Read the file - if(! image.data ) // Check for invalid input + if(! image.data ) // Check for invalid input { cout << "Could not open or find the image" << std::endl ; return -1; } - namedWindow( "Display window", CV_WINDOW_AUTOSIZE );// Create a window for display. - imshow( "Display window", image ); // Show our image inside it. + namedWindow( "Display window", WINDOW_AUTOSIZE ); // Create a window for display. + imshow( "Display window", image ); // Show our image inside it. - waitKey(0); // Wait for a keystroke in the window + waitKey(0); // Wait for a keystroke in the window return 0; } \ No newline at end of file diff --git a/samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/Test.cpp b/samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/Test.cpp index 7f43f0e779..b1bac5babe 100644 --- a/samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/Test.cpp +++ b/samples/cpp/tutorial_code/introduction/windows_visual_studio_Opencv/Test.cpp @@ -59,10 +59,10 @@ int main(int argc, char *argv[]) return -1; } - Size refS = Size((int) captRefrnc.get(CV_CAP_PROP_FRAME_WIDTH), - (int) captRefrnc.get(CV_CAP_PROP_FRAME_HEIGHT)), - uTSi = Size((int) captUndTst.get(CV_CAP_PROP_FRAME_WIDTH), - (int) captUndTst.get(CV_CAP_PROP_FRAME_HEIGHT)); + Size refS = Size((int) captRefrnc.get(CAP_PROP_FRAME_WIDTH), + (int) captRefrnc.get(CAP_PROP_FRAME_HEIGHT)), + uTSi = Size((int) captUndTst.get(CAP_PROP_FRAME_WIDTH), + (int) captUndTst.get(CAP_PROP_FRAME_HEIGHT)); if (refS != uTSi) { @@ -74,13 +74,13 @@ int main(int argc, char *argv[]) const char* WIN_RF = "Reference"; // Windows - namedWindow(WIN_RF, CV_WINDOW_AUTOSIZE ); - namedWindow(WIN_UT, CV_WINDOW_AUTOSIZE ); - cvMoveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0) - cvMoveWindow(WIN_UT, refS.width, 0); //1500, 2 + namedWindow(WIN_RF, WINDOW_AUTOSIZE ); + namedWindow(WIN_UT, WINDOW_AUTOSIZE ); + moveWindow(WIN_RF, 400 , 0); //750, 2 (bernat =0) + moveWindow(WIN_UT, refS.width, 0); //1500, 2 cout << "Frame resolution: Width=" << refS.width << " Height=" << refS.height - << " of nr#: " << captRefrnc.get(CV_CAP_PROP_FRAME_COUNT) << endl; + << " of nr#: " << captRefrnc.get(CAP_PROP_FRAME_COUNT) << endl; cout << "PSNR trigger value " << setiosflags(ios::fixed) << setprecision(3) << psnrTriggerValue << endl; @@ -124,7 +124,7 @@ int main(int argc, char *argv[]) imshow( WIN_RF, frameReference); imshow( WIN_UT, frameUnderTest); - c = (char)cvWaitKey(delay); + c = (char)waitKey(delay); if (c == 27) break; } diff --git a/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp b/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp index 7ada2777a8..54e7741ac1 100644 --- a/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp +++ b/samples/cpp/tutorial_code/objectDetection/objectDetection.cpp @@ -8,6 +8,8 @@ #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/core/utility.hpp" +#include "opencv2/highgui/highgui_c.h" + #include #include diff --git a/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp b/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp index ffaf21123c..84eedc8132 100644 --- a/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp +++ b/samples/cpp/tutorial_code/objectDetection/objectDetection2.cpp @@ -8,6 +8,8 @@ #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/core/utility.hpp" +#include "opencv2/highgui/highgui_c.h" + #include #include diff --git a/samples/cpp/video_dmtx.cpp b/samples/cpp/video_dmtx.cpp index c5eaf1e393..88dc28a418 100644 --- a/samples/cpp/video_dmtx.cpp +++ b/samples/cpp/video_dmtx.cpp @@ -44,7 +44,7 @@ namespace char filename[200]; string window_name = "video | q or esc to quit"; cout << "press space to save a picture. q or esc to quit" << endl; - namedWindow(window_name, CV_WINDOW_KEEPRATIO); //resizable window; + namedWindow(window_name, WINDOW_KEEPRATIO); //resizable window; Mat frame; for (;;) { diff --git a/samples/cpp/videostab.cpp b/samples/cpp/videostab.cpp index 025a0279b2..686b3636f4 100644 --- a/samples/cpp/videostab.cpp +++ b/samples/cpp/videostab.cpp @@ -48,7 +48,7 @@ void run() if (!outputPath.empty()) { if (!writer.isOpened()) - writer.open(outputPath, CV_FOURCC('X','V','I','D'), + writer.open(outputPath, VideoWriter::fourcc('X','V','I','D'), outputFps, stabilizedFrame.size()); writer << stabilizedFrame; } diff --git a/samples/cpp/watershed.cpp b/samples/cpp/watershed.cpp index 7494754086..cd47505045 100644 --- a/samples/cpp/watershed.cpp +++ b/samples/cpp/watershed.cpp @@ -29,11 +29,11 @@ static void onMouse( int event, int x, int y, int flags, void* ) { if( x < 0 || x >= img.cols || y < 0 || y >= img.rows ) return; - if( event == CV_EVENT_LBUTTONUP || !(flags & CV_EVENT_FLAG_LBUTTON) ) + if( event == EVENT_LBUTTONUP || !(flags & EVENT_FLAG_LBUTTON) ) prevPt = Point(-1,-1); - else if( event == CV_EVENT_LBUTTONDOWN ) + else if( event == EVENT_LBUTTONDOWN ) prevPt = Point(x,y); - else if( event == CV_EVENT_MOUSEMOVE && (flags & CV_EVENT_FLAG_LBUTTON) ) + else if( event == EVENT_MOUSEMOVE && (flags & EVENT_FLAG_LBUTTON) ) { Point pt(x, y); if( prevPt.x < 0 ) diff --git a/samples/gpu/hog.cpp b/samples/gpu/hog.cpp index 3e451e87fa..13f19a9e1e 100644 --- a/samples/gpu/hog.cpp +++ b/samples/gpu/hog.cpp @@ -345,7 +345,7 @@ void App::run() { if (!video_writer.isOpened()) { - video_writer.open(args.dst_video, CV_FOURCC('x','v','i','d'), args.dst_video_fps, + video_writer.open(args.dst_video, VideoWriter::fourcc('x','v','i','d'), args.dst_video_fps, img_to_show.size(), true); if (!video_writer.isOpened()) throw std::runtime_error("can't create video writer"); diff --git a/samples/gpu/morphology.cpp b/samples/gpu/morphology.cpp index 13b15807a8..1ed8f96dc9 100644 --- a/samples/gpu/morphology.cpp +++ b/samples/gpu/morphology.cpp @@ -102,7 +102,7 @@ int main( int argc, char** argv ) OpenClose(open_close_pos, 0); ErodeDilate(erode_dilate_pos, 0); - c = cvWaitKey(0); + c = waitKey(); if( (char)c == 27 ) break; diff --git a/samples/gpu/performance/tests.cpp b/samples/gpu/performance/tests.cpp index 8b213559c9..7c7969de91 100644 --- a/samples/gpu/performance/tests.cpp +++ b/samples/gpu/performance/tests.cpp @@ -275,7 +275,7 @@ TEST(meanShift) TEST(SURF) { - Mat src = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE); + Mat src = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE); if (src.empty()) throw runtime_error("can't open aloeL.jpg"); SURF surf; @@ -305,7 +305,7 @@ TEST(SURF) TEST(FAST) { - Mat src = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE); + Mat src = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE); if (src.empty()) throw runtime_error("can't open aloeL.jpg"); vector keypoints; @@ -330,7 +330,7 @@ TEST(FAST) TEST(ORB) { - Mat src = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE); + Mat src = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE); if (src.empty()) throw runtime_error("can't open aloeL.jpg"); ORB orb(4000); @@ -1055,7 +1055,7 @@ TEST(equalizeHist) TEST(Canny) { - Mat img = imread(abspath("aloeL.jpg"), CV_LOAD_IMAGE_GRAYSCALE); + Mat img = imread(abspath("aloeL.jpg"), IMREAD_GRAYSCALE); if (img.empty()) throw runtime_error("can't open aloeL.jpg"); diff --git a/samples/gpu/super_resolution.cpp b/samples/gpu/super_resolution.cpp index dca9e8b017..4f3b4e20e9 100644 --- a/samples/gpu/super_resolution.cpp +++ b/samples/gpu/super_resolution.cpp @@ -146,7 +146,7 @@ int main(int argc, const char* argv[]) if (!outputVideoName.empty()) { if (!writer.isOpened()) - writer.open(outputVideoName, CV_FOURCC('X', 'V', 'I', 'D'), 25.0, result.size()); + writer.open(outputVideoName, VideoWriter::fourcc('X', 'V', 'I', 'D'), 25.0, result.size()); writer << result; } } diff --git a/samples/gpu/surf_keypoint_matcher.cpp b/samples/gpu/surf_keypoint_matcher.cpp index f4c5e73f79..fd3578d9f4 100644 --- a/samples/gpu/surf_keypoint_matcher.cpp +++ b/samples/gpu/surf_keypoint_matcher.cpp @@ -33,12 +33,12 @@ int main(int argc, char* argv[]) { if (string(argv[i]) == "--left") { - img1.upload(imread(argv[++i], CV_LOAD_IMAGE_GRAYSCALE)); + img1.upload(imread(argv[++i], IMREAD_GRAYSCALE)); CV_Assert(!img1.empty()); } else if (string(argv[i]) == "--right") { - img2.upload(imread(argv[++i], CV_LOAD_IMAGE_GRAYSCALE)); + img2.upload(imread(argv[++i], IMREAD_GRAYSCALE)); CV_Assert(!img2.empty()); } else if (string(argv[i]) == "--help") diff --git a/samples/gpu/video_writer.cpp b/samples/gpu/video_writer.cpp index e5154d53a0..7976567526 100644 --- a/samples/gpu/video_writer.cpp +++ b/samples/gpu/video_writer.cpp @@ -55,7 +55,7 @@ int main(int argc, const char* argv[]) std::cout << "Open CPU Writer" << std::endl; - if (!writer.open("output_cpu.avi", CV_FOURCC('X', 'V', 'I', 'D'), FPS, frame.size())) + if (!writer.open("output_cpu.avi", cv::VideoWriter::fourcc('X', 'V', 'I', 'D'), FPS, frame.size())) return -1; } diff --git a/samples/ocl/facedetect.cpp b/samples/ocl/facedetect.cpp index b5bab18e86..108f4bc2d7 100644 --- a/samples/ocl/facedetect.cpp +++ b/samples/ocl/facedetect.cpp @@ -4,6 +4,9 @@ #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/ocl/ocl.hpp" + +#include "opencv2/highgui/highgui_c.h" + #include #include diff --git a/samples/ocl/hog.cpp b/samples/ocl/hog.cpp index a10ad94cd2..d5fe9af7e7 100644 --- a/samples/ocl/hog.cpp +++ b/samples/ocl/hog.cpp @@ -345,7 +345,7 @@ void App::run() { if (!video_writer.isOpened()) { - video_writer.open(args.dst_video, CV_FOURCC('x','v','i','d'), args.dst_video_fps, + video_writer.open(args.dst_video, VideoWriter::fourcc('x','v','i','d'), args.dst_video_fps, img_to_show.size(), true); if (!video_writer.isOpened()) throw std::runtime_error("can't create video writer"); From 69648f0a6ffa9f8c8665ddee2a94982f33740a30 Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Sun, 7 Apr 2013 23:23:53 +0400 Subject: [PATCH 3/6] Make photo.hpp independent from C API --- apps/haartraining/CMakeLists.txt | 2 +- apps/traincascade/CMakeLists.txt | 2 +- include/opencv/cv.h | 2 ++ include/opencv/cv.hpp | 1 + include/opencv/cvaux.h | 2 ++ modules/photo/include/opencv2/photo.hpp | 12 +++--------- modules/photo/src/denoising.cpp | 16 ++++++++-------- modules/photo/src/inpaint.cpp | 7 ++++--- modules/photo/test/test_inpaint.cpp | 4 ++-- modules/python/src2/cv2.cpp | 1 + samples/cpp/inpaint.cpp | 2 +- 11 files changed, 26 insertions(+), 25 deletions(-) diff --git a/apps/haartraining/CMakeLists.txt b/apps/haartraining/CMakeLists.txt index 953be3b7e5..e25f56fc0e 100644 --- a/apps/haartraining/CMakeLists.txt +++ b/apps/haartraining/CMakeLists.txt @@ -1,4 +1,4 @@ -SET(OPENCV_HAARTRAINING_DEPS opencv_core opencv_imgproc opencv_highgui opencv_objdetect opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy) +SET(OPENCV_HAARTRAINING_DEPS opencv_core opencv_imgproc opencv_photo opencv_highgui opencv_objdetect opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy) ocv_check_dependencies(${OPENCV_HAARTRAINING_DEPS}) if(NOT OCV_DEPENDENCIES_FOUND) diff --git a/apps/traincascade/CMakeLists.txt b/apps/traincascade/CMakeLists.txt index f6fa679a9a..72e51b9ea0 100644 --- a/apps/traincascade/CMakeLists.txt +++ b/apps/traincascade/CMakeLists.txt @@ -1,4 +1,4 @@ -set(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_ml opencv_imgproc opencv_objdetect opencv_highgui opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy) +set(OPENCV_TRAINCASCADE_DEPS opencv_core opencv_ml opencv_imgproc opencv_photo opencv_objdetect opencv_highgui opencv_calib3d opencv_video opencv_features2d opencv_flann opencv_legacy) ocv_check_dependencies(${OPENCV_TRAINCASCADE_DEPS}) if(NOT OCV_DEPENDENCIES_FOUND) diff --git a/include/opencv/cv.h b/include/opencv/cv.h index ce0d4e20f8..87fc0f2cfe 100644 --- a/include/opencv/cv.h +++ b/include/opencv/cv.h @@ -62,6 +62,8 @@ #include "opencv2/core/core_c.h" #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/photo/photo_c.h" + #include "opencv2/video.hpp" #include "opencv2/features2d.hpp" #include "opencv2/flann.hpp" diff --git a/include/opencv/cv.hpp b/include/opencv/cv.hpp index f1c5a1d275..6b579ff91c 100644 --- a/include/opencv/cv.hpp +++ b/include/opencv/cv.hpp @@ -50,6 +50,7 @@ #include "cv.h" #include "opencv2/core.hpp" #include "opencv2/imgproc.hpp" +#include "opencv2/photo.hpp" #include "opencv2/highgui.hpp" #endif diff --git a/include/opencv/cvaux.h b/include/opencv/cvaux.h index 6f8077b949..78777ac1b2 100644 --- a/include/opencv/cvaux.h +++ b/include/opencv/cvaux.h @@ -48,6 +48,8 @@ #include "opencv2/core/core_c.h" #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/photo/photo_c.h" + #include "opencv2/video.hpp" #include "opencv2/features2d.hpp" #include "opencv2/calib3d.hpp" diff --git a/modules/photo/include/opencv2/photo.hpp b/modules/photo/include/opencv2/photo.hpp index 07e7e098a9..185b8dcc90 100644 --- a/modules/photo/include/opencv2/photo.hpp +++ b/modules/photo/include/opencv2/photo.hpp @@ -46,10 +46,6 @@ #include "opencv2/core.hpp" #include "opencv2/imgproc.hpp" -#include "opencv2/photo/photo_c.h" - -#ifdef __cplusplus - /*! \namespace cv Namespace where all the C++ OpenCV functionality resides */ @@ -59,8 +55,8 @@ namespace cv //! the inpainting algorithm enum { - INPAINT_NS=CV_INPAINT_NS, // Navier-Stokes algorithm - INPAINT_TELEA=CV_INPAINT_TELEA // A. Telea algorithm + INPAINT_NS = 0, // Navier-Stokes algorithm + INPAINT_TELEA = 1 // A. Telea algorithm }; //! restores the damaged image areas using one of the available intpainting algorithms @@ -84,8 +80,6 @@ CV_EXPORTS_W void fastNlMeansDenoisingColoredMulti( InputArrayOfArrays srcImgs, float h = 3, float hColor = 3, int templateWindowSize = 7, int searchWindowSize = 21); -} - -#endif //__cplusplus +} // cv #endif diff --git a/modules/photo/src/denoising.cpp b/modules/photo/src/denoising.cpp index b2349ba273..4d3e6c8f94 100644 --- a/modules/photo/src/denoising.cpp +++ b/modules/photo/src/denoising.cpp @@ -74,7 +74,7 @@ void cv::fastNlMeansDenoising( InputArray _src, OutputArray _dst, float h, src, dst, templateWindowSize, searchWindowSize, h)); break; default: - CV_Error(CV_StsBadArg, + CV_Error(Error::StsBadArg, "Unsupported image format! Only CV_8UC1, CV_8UC2 and CV_8UC3 are supported"); } } @@ -88,7 +88,7 @@ void cv::fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst, Mat dst = _dst.getMat(); if (src.type() != CV_8UC3) { - CV_Error(CV_StsBadArg, "Type of input image should be CV_8UC3!"); + CV_Error(Error::StsBadArg, "Type of input image should be CV_8UC3!"); return; } @@ -118,27 +118,27 @@ static void fastNlMeansDenoisingMultiCheckPreconditions( { int src_imgs_size = (int)srcImgs.size(); if (src_imgs_size == 0) { - CV_Error(CV_StsBadArg, "Input images vector should not be empty!"); + CV_Error(Error::StsBadArg, "Input images vector should not be empty!"); } if (temporalWindowSize % 2 == 0 || searchWindowSize % 2 == 0 || templateWindowSize % 2 == 0) { - CV_Error(CV_StsBadArg, "All windows sizes should be odd!"); + CV_Error(Error::StsBadArg, "All windows sizes should be odd!"); } int temporalWindowHalfSize = temporalWindowSize / 2; if (imgToDenoiseIndex - temporalWindowHalfSize < 0 || imgToDenoiseIndex + temporalWindowHalfSize >= src_imgs_size) { - CV_Error(CV_StsBadArg, + CV_Error(Error::StsBadArg, "imgToDenoiseIndex and temporalWindowSize " "should be choosen corresponding srcImgs size!"); } for (int i = 1; i < src_imgs_size; i++) { if (srcImgs[0].size() != srcImgs[i].size() || srcImgs[0].type() != srcImgs[i].type()) { - CV_Error(CV_StsBadArg, "Input images should have the same size and type!"); + CV_Error(Error::StsBadArg, "Input images should have the same size and type!"); } } } @@ -177,7 +177,7 @@ void cv::fastNlMeansDenoisingMulti( InputArrayOfArrays _srcImgs, OutputArray _ds dst, templateWindowSize, searchWindowSize, h)); break; default: - CV_Error(CV_StsBadArg, + CV_Error(Error::StsBadArg, "Unsupported matrix format! Only uchar, Vec2b, Vec3b are supported"); } } @@ -201,7 +201,7 @@ void cv::fastNlMeansDenoisingColoredMulti( InputArrayOfArrays _srcImgs, OutputAr int src_imgs_size = (int)srcImgs.size(); if (srcImgs[0].type() != CV_8UC3) { - CV_Error(CV_StsBadArg, "Type of input images should be CV_8UC3!"); + CV_Error(Error::StsBadArg, "Type of input images should be CV_8UC3!"); return; } diff --git a/modules/photo/src/inpaint.cpp b/modules/photo/src/inpaint.cpp index ce05fc3d1a..ec91e3c1bf 100644 --- a/modules/photo/src/inpaint.cpp +++ b/modules/photo/src/inpaint.cpp @@ -47,6 +47,7 @@ #include "precomp.hpp" #include "opencv2/imgproc/imgproc_c.h" +#include "opencv2/photo/photo_c.h" #undef CV_MAT_ELEM_PTR_FAST #define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \ @@ -782,7 +783,7 @@ cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_i cvSet(f,cvScalar(INSIDE,0,0,0),mask); cvSet(t,cvScalar(0,0,0,0),band); - if( flags == CV_INPAINT_TELEA ) + if( flags == cv::INPAINT_TELEA ) { out = cvCreateMat(erows, ecols, CV_8UC1); el_range = cvCreateStructuringElementEx(2*range+1,2*range+1, @@ -799,10 +800,10 @@ cvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_i icvCalcFMM(out,t,Out,true); icvTeleaInpaintFMM(mask,t,output_img,range,Heap); } - else if (flags == CV_INPAINT_NS) { + else if (flags == cv::INPAINT_NS) { icvNSInpaintFMM(mask,t,output_img,range,Heap); } else { - CV_Error( CV_StsBadArg, "The flags argument must be one of CV_INPAINT_TELEA or CV_INPAINT_NS" ); + CV_Error( cv::Error::StsBadArg, "The flags argument must be one of CV_INPAINT_TELEA or CV_INPAINT_NS" ); } } diff --git a/modules/photo/test/test_inpaint.cpp b/modules/photo/test/test_inpaint.cpp index ef7132d6da..3c341b27a0 100644 --- a/modules/photo/test/test_inpaint.cpp +++ b/modules/photo/test/test_inpaint.cpp @@ -84,8 +84,8 @@ void CV_InpaintTest::run( int ) test.setTo(Scalar::all(255), mask1ch); Mat res1, res2; - inpaint( test, mask1ch, res1, 5, CV_INPAINT_NS ); - inpaint( test, mask1ch, res2, 5, CV_INPAINT_TELEA ); + inpaint( test, mask1ch, res1, 5, INPAINT_NS ); + inpaint( test, mask1ch, res2, 5, INPAINT_TELEA ); Mat diff1, diff2; absdiff( orig, res1, diff1 ); diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index 7455c5c6bb..75c8c2ec09 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -24,6 +24,7 @@ #include "opencv2/highgui.hpp" #include "opencv2/highgui/highgui_c.h" +#include "opencv2/photo/photo_c.h" #include "opencv2/opencv_modules.hpp" diff --git a/samples/cpp/inpaint.cpp b/samples/cpp/inpaint.cpp index 1ec00034a9..7690595cc2 100644 --- a/samples/cpp/inpaint.cpp +++ b/samples/cpp/inpaint.cpp @@ -81,7 +81,7 @@ int main( int argc, char** argv ) if( c == 'i' || c == ' ' ) { Mat inpainted; - inpaint(img, inpaintMask, inpainted, 3, CV_INPAINT_TELEA); + inpaint(img, inpaintMask, inpainted, 3, INPAINT_TELEA); imshow("inpainted image", inpainted); } } From ae4e76395f475c67d2065a4a2ebd420fe53e138b Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Mon, 8 Apr 2013 00:24:39 +0400 Subject: [PATCH 4/6] Make flann headers independent from OpenCV C API --- modules/features2d/include/opencv2/features2d.hpp | 3 +-- modules/flann/include/opencv2/flann.hpp | 5 ----- modules/flann/include/opencv2/flann/miniflann.hpp | 4 ---- modules/flann/test/test_lshtable_badarg.cpp | 6 +++--- 4 files changed, 4 insertions(+), 14 deletions(-) diff --git a/modules/features2d/include/opencv2/features2d.hpp b/modules/features2d/include/opencv2/features2d.hpp index 071bea6aa9..5997668fee 100644 --- a/modules/features2d/include/opencv2/features2d.hpp +++ b/modules/features2d/include/opencv2/features2d.hpp @@ -43,10 +43,9 @@ #ifndef __OPENCV_FEATURES_2D_HPP__ #define __OPENCV_FEATURES_2D_HPP__ -#include "opencv2/flann/miniflann.hpp" - #ifdef __cplusplus #include "opencv2/core.hpp" +#include "opencv2/flann/miniflann.hpp" #include namespace cv diff --git a/modules/flann/include/opencv2/flann.hpp b/modules/flann/include/opencv2/flann.hpp index 0fc9298e35..36ca8c7c3a 100644 --- a/modules/flann/include/opencv2/flann.hpp +++ b/modules/flann/include/opencv2/flann.hpp @@ -43,9 +43,6 @@ #ifndef _OPENCV_FLANN_HPP_ #define _OPENCV_FLANN_HPP_ -#ifdef __cplusplus - -#include "opencv2/core/types_c.h" #include "opencv2/core.hpp" #include "opencv2/flann/miniflann.hpp" #include "opencv2/flann/flann_base.hpp" @@ -422,6 +419,4 @@ FLANN_DEPRECATED int hierarchicalClustering(const Mat& features, Mat& centers, c } } // namespace cv::flann -#endif // __cplusplus - #endif diff --git a/modules/flann/include/opencv2/flann/miniflann.hpp b/modules/flann/include/opencv2/flann/miniflann.hpp index d35f961362..f2acc23bff 100644 --- a/modules/flann/include/opencv2/flann/miniflann.hpp +++ b/modules/flann/include/opencv2/flann/miniflann.hpp @@ -43,8 +43,6 @@ #ifndef _OPENCV_MINIFLANN_HPP_ #define _OPENCV_MINIFLANN_HPP_ -#ifdef __cplusplus - #include "opencv2/core.hpp" #include "opencv2/flann/defines.h" @@ -157,6 +155,4 @@ protected: } } // namespace cv::flann -#endif // __cplusplus - #endif diff --git a/modules/flann/test/test_lshtable_badarg.cpp b/modules/flann/test/test_lshtable_badarg.cpp index 9e42335c2c..3b776668d1 100644 --- a/modules/flann/test/test_lshtable_badarg.cpp +++ b/modules/flann/test/test_lshtable_badarg.cpp @@ -74,13 +74,13 @@ void CV_LshTableBadArgTest::run( int /* start_from */ ) int errors = 0; caller.key_size = 0; - errors += run_test_case(CV_StsBadArg, "key_size is zero", caller); + errors += run_test_case(Error::StsBadArg, "key_size is zero", caller); caller.key_size = static_cast(sizeof(size_t) * CHAR_BIT); - errors += run_test_case(CV_StsBadArg, "key_size is too big", caller); + errors += run_test_case(Error::StsBadArg, "key_size is too big", caller); caller.key_size += cvtest::randInt(rng) % 100; - errors += run_test_case(CV_StsBadArg, "key_size is too big", caller); + errors += run_test_case(Error::StsBadArg, "key_size is too big", caller); if (errors != 0) ts->set_failed_test_info(cvtest::TS::FAIL_MISMATCH); From befd69672095526f5a496352ac2ec1afc7c19a8e Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Mon, 8 Apr 2013 13:21:19 +0400 Subject: [PATCH 5/6] Update documentation --- modules/core/doc/drawing_functions.rst | 29 +++++++++++-------- modules/highgui/doc/qt_new_functions.rst | 6 ++-- .../reading_and_writing_images_and_video.rst | 6 ++-- modules/imgproc/doc/filtering.rst | 2 +- .../objdetect/doc/cascade_classification.rst | 2 +- 5 files changed, 25 insertions(+), 20 deletions(-) diff --git a/modules/core/doc/drawing_functions.rst b/modules/core/doc/drawing_functions.rst index f821fc44ee..4f816c075a 100644 --- a/modules/core/doc/drawing_functions.rst +++ b/modules/core/doc/drawing_functions.rst @@ -30,7 +30,7 @@ circle ---------- Draws a circle. -.. ocv:function:: void circle(Mat& img, Point center, int radius, const Scalar& color, int thickness=1, int lineType=8, int shift=0) +.. ocv:function:: void circle( Mat& img, Point center, int radius, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 ) .. ocv:pyfunction:: cv2.circle(img, center, radius, color[, thickness[, lineType[, shift]]]) -> img @@ -83,9 +83,9 @@ ellipse ----------- Draws a simple or thick elliptic arc or fills an ellipse sector. -.. ocv:function:: void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1, int lineType=8, int shift=0) +.. ocv:function:: void ellipse( Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 ) -.. ocv:function:: void ellipse(Mat& img, const RotatedRect& box, const Scalar& color, int thickness=1, int lineType=8) +.. ocv:function:: void ellipse( Mat& img, const RotatedRect& box, const Scalar& color, int thickness=1, int lineType=LINE_8 ) .. ocv:pyfunction:: cv2.ellipse(img, center, axes, angle, startAngle, endAngle, color[, thickness[, lineType[, shift]]]) -> img @@ -162,7 +162,9 @@ fillConvexPoly ------------------ Fills a convex polygon. -.. ocv:function:: void fillConvexPoly(Mat& img, const Point* pts, int npts, const Scalar& color, int lineType=8, int shift=0) +.. ocv:function:: void fillConvexPoly( Mat& img, const Point* pts, int npts, const Scalar& color, int lineType=LINE_8, int shift=0 ) + +.. ocv:function:: void fillConvexPoly( InputOutputArray img, InputArray points, const Scalar& color, int lineType=LINE_8, int shift=0 ) .. ocv:pyfunction:: cv2.fillConvexPoly(img, points, color[, lineType[, shift]]) -> img @@ -192,7 +194,9 @@ fillPoly ------------ Fills the area bounded by one or more polygons. -.. ocv:function:: void fillPoly(Mat& img, const Point** pts, const int* npts, int ncontours, const Scalar& color, int lineType=8, int shift=0, Point offset=Point() ) +.. ocv:function:: void fillPoly( Mat& img, const Point** pts, const int* npts, int ncontours, const Scalar& color, int lineType=LINE_8, int shift=0, Point offset=Point() ) + +.. ocv:function:: void fillPoly( InputOutputArray img, InputArrayOfArrays pts, const Scalar& color, int lineType=LINE_8, int shift=0, Point offset=Point() ) .. ocv:pyfunction:: cv2.fillPoly(img, pts, color[, lineType[, shift[, offset]]]) -> img @@ -330,7 +334,7 @@ line -------- Draws a line segment connecting two points. -.. ocv:function:: void line(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0) +.. ocv:function:: void line( Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 ) .. ocv:pyfunction:: cv2.line(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img @@ -418,9 +422,9 @@ rectangle ------------- Draws a simple, thick, or filled up-right rectangle. -.. ocv:function:: void rectangle(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0) +.. ocv:function:: void rectangle( Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 ) -.. ocv:function:: void rectangle( Mat& img, Rect rec, const Scalar& color, int thickness=1, int lineType=8, int shift=0 ) +.. ocv:function:: void rectangle( Mat& img, Rect rec, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 ) .. ocv:pyfunction:: cv2.rectangle(img, pt1, pt2, color[, thickness[, lineType[, shift]]]) -> img @@ -452,9 +456,9 @@ polylines ------------- Draws several polygonal curves. -.. ocv:function:: void polylines( Mat& img, const Point* const* pts, const int* npts, int ncontours, bool isClosed, const Scalar& color, int thickness=1, int lineType=8, int shift=0 ) +.. ocv:function:: void polylines( Mat& img, const Point* const* pts, const int* npts, int ncontours, bool isClosed, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 ) -.. ocv:function:: void polylines( InputOutputArray img, InputArrayOfArrays pts, bool isClosed, const Scalar& color, int thickness=1, int lineType=8, int shift=0 ) +.. ocv:function:: void polylines( InputOutputArray img, InputArrayOfArrays pts, bool isClosed, const Scalar& color, int thickness=1, int lineType=LINE_8, int shift=0 ) .. ocv:pyfunction:: cv2.polylines(img, pts, isClosed, color[, thickness[, lineType[, shift]]]) -> img @@ -487,7 +491,7 @@ drawContours ---------------- Draws contours outlines or filled contours. -.. ocv:function:: void drawContours( InputOutputArray image, InputArrayOfArrays contours, int contourIdx, const Scalar& color, int thickness=1, int lineType=8, InputArray hierarchy=noArray(), int maxLevel=INT_MAX, Point offset=Point() ) +.. ocv:function:: void drawContours( InputOutputArray image, InputArrayOfArrays contours, int contourIdx, const Scalar& color, int thickness=1, int lineType=LINE_8, InputArray hierarchy=noArray(), int maxLevel=INT_MAX, Point offset=Point() ) .. ocv:pyfunction:: cv2.drawContours(image, contours, contourIdx, color[, thickness[, lineType[, hierarchy[, maxLevel[, offset]]]]]) -> image @@ -570,11 +574,12 @@ putText ----------- Draws a text string. -.. ocv:function:: void putText( Mat& img, const String& text, Point org, int fontFace, double fontScale, Scalar color, int thickness=1, int lineType=8, bool bottomLeftOrigin=false ) +.. ocv:function:: void putText( Mat& img, const String& text, Point org, int fontFace, double fontScale, Scalar color, int thickness=1, int lineType=LINE_8, bool bottomLeftOrigin=false ) .. ocv:pyfunction:: cv2.putText(img, text, org, fontFace, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]]) -> None .. ocv:cfunction:: void cvPutText( CvArr* img, const char* text, CvPoint org, const CvFont* font, CvScalar color ) + .. ocv:pyoldfunction:: cv.PutText(img, text, org, font, color)-> None :param img: Image. diff --git a/modules/highgui/doc/qt_new_functions.rst b/modules/highgui/doc/qt_new_functions.rst index 671b558238..ee77e4d58c 100644 --- a/modules/highgui/doc/qt_new_functions.rst +++ b/modules/highgui/doc/qt_new_functions.rst @@ -123,7 +123,7 @@ fontQt ---------- Creates the font to draw a text on an image. -.. ocv:function:: CvFont fontQt(const String& nameFont, int pointSize = -1, Scalar color = Scalar::all(0), int weight = CV_FONT_NORMAL, int style = CV_STYLE_NORMAL, int spacing = 0) +.. ocv:function:: QtFont fontQt( const String& nameFont, int pointSize=-1, Scalar color=Scalar::all(0), int weight=QT_FONT_NORMAL, int style=QT_STYLE_NORMAL, int spacing=0 ) .. ocv:cfunction:: CvFont cvFontQt(const char* nameFont, int pointSize=-1, CvScalar color=cvScalarAll(0), int weight=CV_FONT_NORMAL, int style=CV_STYLE_NORMAL, int spacing=0) @@ -169,7 +169,7 @@ addText ----------- Creates the font to draw a text on an image. -.. ocv:function:: void addText( const Mat& img, const String& text, Point org, CvFont font ) +.. ocv:function:: void addText( const Mat& img, const String& text, Point org, const QtFont& font ) .. ocv:cfunction:: void cvAddText( const CvArr* img, const char* text, CvPoint org, CvFont * arg2 ) @@ -302,7 +302,7 @@ createButton ---------------- Attaches a button to the control panel. -.. ocv:function:: int createButton( const String& bar_name, ButtonCallback on_change, void* userdata=NULL, int type=CV_PUSH_BUTTON, bool initial_button_state=0 ) +.. ocv:function:: int createButton( const String& bar_name, ButtonCallback on_change, void* userdata=0, int type=QT_PUSH_BUTTON, bool initial_button_state=false ) .. ocv:cfunction:: int cvCreateButton( const char* button_name=NULL, CvButtonCallback on_change=NULL, void* userdata=NULL, int button_type=CV_PUSH_BUTTON, int initial_button_state=0 ) diff --git a/modules/highgui/doc/reading_and_writing_images_and_video.rst b/modules/highgui/doc/reading_and_writing_images_and_video.rst index 178026c0b7..914856dc3a 100644 --- a/modules/highgui/doc/reading_and_writing_images_and_video.rst +++ b/modules/highgui/doc/reading_and_writing_images_and_video.rst @@ -59,7 +59,7 @@ imread ------ Loads an image from a file. -.. ocv:function:: Mat imread( const String& filename, int flags=1 ) +.. ocv:function:: Mat imread( const String& filename, int flags=IMREAD_COLOR ) .. ocv:pyfunction:: cv2.imread(filename[, flags]) -> retval @@ -321,9 +321,9 @@ VideoCapture::retrieve ---------------------- Decodes and returns the grabbed video frame. -.. ocv:function:: bool VideoCapture::retrieve(Mat& image, int channel=0) +.. ocv:function:: bool VideoCapture::retrieve( Mat& image, int flag=0 ) -.. ocv:pyfunction:: cv2.VideoCapture.retrieve([image[, channel]]) -> retval, image +.. ocv:pyfunction:: cv2.VideoCapture.retrieve([image[, flag]]) -> retval, image .. ocv:cfunction:: IplImage* cvRetrieveFrame( CvCapture* capture, int streamIdx=0 ) diff --git a/modules/imgproc/doc/filtering.rst b/modules/imgproc/doc/filtering.rst index 0b1f9a34e4..81a64bd333 100755 --- a/modules/imgproc/doc/filtering.rst +++ b/modules/imgproc/doc/filtering.rst @@ -1356,7 +1356,7 @@ pyrMeanShiftFiltering --------------------- Performs initial step of meanshift segmentation of an image. -.. ocv:function:: void pyrMeanShiftFiltering( InputArray src, OutputArray dst, double sp, double sr, int maxLevel=1, TermCriteria termcrit=TermCriteria( TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ) +.. ocv:function:: void pyrMeanShiftFiltering( InputArray src, OutputArray dst, double sp, double sr, int maxLevel=1, TermCriteria termcrit=TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS,5,1) ) .. ocv:pyfunction:: cv2.pyrMeanShiftFiltering(src, sp, sr[, dst[, maxLevel[, termcrit]]]) -> dst diff --git a/modules/objdetect/doc/cascade_classification.rst b/modules/objdetect/doc/cascade_classification.rst index 2809ab32b7..8428079641 100644 --- a/modules/objdetect/doc/cascade_classification.rst +++ b/modules/objdetect/doc/cascade_classification.rst @@ -191,7 +191,7 @@ Detects objects of different sizes in the input image. The detected objects are .. ocv:function:: void CascadeClassifier::detectMultiScale( const Mat& image, vector& objects, double scaleFactor=1.1, int minNeighbors=3, int flags=0, Size minSize=Size(), Size maxSize=Size()) .. ocv:pyfunction:: cv2.CascadeClassifier.detectMultiScale(image[, scaleFactor[, minNeighbors[, flags[, minSize[, maxSize]]]]]) -> objects -.. ocv:pyfunction:: cv2.CascadeClassifier.detectMultiScale(image, rejectLevels, levelWeights[, scaleFactor[, minNeighbors[, flags[, minSize[, maxSize[, outputRejectLevels]]]]]]) -> objects +.. ocv:pyfunction:: cv2.CascadeClassifier.detectMultiScale(image[, scaleFactor[, minNeighbors[, flags[, minSize[, maxSize[, outputRejectLevels]]]]]]) -> objects, rejectLevels, levelWeights .. ocv:cfunction:: CvSeq* cvHaarDetectObjects( const CvArr* image, CvHaarClassifierCascade* cascade, CvMemStorage* storage, double scale_factor=1.1, int min_neighbors=3, int flags=0, CvSize min_size=cvSize(0,0), CvSize max_size=cvSize(0,0) ) From dd74a851f3524ab3f675baeb2870d8f0b707ac5e Mon Sep 17 00:00:00 2001 From: Andrey Kamaev Date: Mon, 8 Apr 2013 15:09:48 +0400 Subject: [PATCH 6/6] Fix build errors --- modules/highgui/src/cap_ffmpeg.cpp | 2 +- modules/highgui/src/ffmpeg_codecs.hpp | 6 ++++-- modules/java/generator/src/cpp/utils.cpp | 12 ++++++------ modules/nonfree/src/sift.cpp | 4 ++-- modules/nonfree/src/surf.cpp | 18 ++++++++---------- modules/nonfree/test/test_detectors.cpp | 6 +++--- .../test_rotation_and_scale_invariance.cpp | 12 ++++++------ .../stitching/detail/motion_estimators.hpp | 8 ++++---- samples/android/hello-android/main.cpp | 4 ++-- 9 files changed, 36 insertions(+), 36 deletions(-) diff --git a/modules/highgui/src/cap_ffmpeg.cpp b/modules/highgui/src/cap_ffmpeg.cpp index 8a370147f2..57f67dab9c 100644 --- a/modules/highgui/src/cap_ffmpeg.cpp +++ b/modules/highgui/src/cap_ffmpeg.cpp @@ -41,7 +41,7 @@ #include "precomp.hpp" -#ifndef WIN32 +#if defined HAVE_FFMPEG && !defined WIN32 #include "cap_ffmpeg_impl.hpp" #else #include "cap_ffmpeg_api.hpp" diff --git a/modules/highgui/src/ffmpeg_codecs.hpp b/modules/highgui/src/ffmpeg_codecs.hpp index 02430ef114..c1e116b756 100644 --- a/modules/highgui/src/ffmpeg_codecs.hpp +++ b/modules/highgui/src/ffmpeg_codecs.hpp @@ -61,8 +61,10 @@ extern "C" { #endif #ifdef WIN32 -# define AVUTIL_COMMON_H -# define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) +# ifdef __OPENCV_BUILD +# define AVUTIL_COMMON_H +# define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) +# endif # include #else diff --git a/modules/java/generator/src/cpp/utils.cpp b/modules/java/generator/src/cpp/utils.cpp index ed964e377e..90abfd9f53 100644 --- a/modules/java/generator/src/cpp/utils.cpp +++ b/modules/java/generator/src/cpp/utils.cpp @@ -44,7 +44,7 @@ JNIEXPORT void JNICALL Java_org_opencv_android_Utils_nBitmapToMat2 // info.format == ANDROID_BITMAP_FORMAT_RGB_565 LOGD("nBitmapToMat: RGB_565 -> CV_8UC4"); Mat tmp(info.height, info.width, CV_8UC2, pixels); - cvtColor(tmp, dst, CV_BGR5652RGBA); + cvtColor(tmp, dst, COLOR_BGR5652RGBA); } AndroidBitmap_unlockPixels(env, bitmap); return; @@ -104,10 +104,10 @@ JNIEXPORT void JNICALL Java_org_opencv_android_Utils_nMatToBitmap2 if(src.type() == CV_8UC1) { LOGD("nMatToBitmap: CV_8UC1 -> RGBA_8888"); - cvtColor(src, tmp, CV_GRAY2RGBA); + cvtColor(src, tmp, COLOR_GRAY2RGBA); } else if(src.type() == CV_8UC3){ LOGD("nMatToBitmap: CV_8UC3 -> RGBA_8888"); - cvtColor(src, tmp, CV_RGB2RGBA); + cvtColor(src, tmp, COLOR_RGB2RGBA); } else if(src.type() == CV_8UC4){ LOGD("nMatToBitmap: CV_8UC4 -> RGBA_8888"); if(needPremultiplyAlpha) cvtColor(src, tmp, COLOR_RGBA2mRGBA); @@ -119,13 +119,13 @@ JNIEXPORT void JNICALL Java_org_opencv_android_Utils_nMatToBitmap2 if(src.type() == CV_8UC1) { LOGD("nMatToBitmap: CV_8UC1 -> RGB_565"); - cvtColor(src, tmp, CV_GRAY2BGR565); + cvtColor(src, tmp, COLOR_GRAY2BGR565); } else if(src.type() == CV_8UC3){ LOGD("nMatToBitmap: CV_8UC3 -> RGB_565"); - cvtColor(src, tmp, CV_RGB2BGR565); + cvtColor(src, tmp, COLOR_RGB2BGR565); } else if(src.type() == CV_8UC4){ LOGD("nMatToBitmap: CV_8UC4 -> RGB_565"); - cvtColor(src, tmp, CV_RGBA2BGR565); + cvtColor(src, tmp, COLOR_RGBA2BGR565); } } AndroidBitmap_unlockPixels(env, bitmap); diff --git a/modules/nonfree/src/sift.cpp b/modules/nonfree/src/sift.cpp index 4161c4911e..ba65690426 100644 --- a/modules/nonfree/src/sift.cpp +++ b/modules/nonfree/src/sift.cpp @@ -732,10 +732,10 @@ void SIFT::operator()(InputArray _image, InputArray _mask, Mat image = _image.getMat(), mask = _mask.getMat(); if( image.empty() || image.depth() != CV_8U ) - CV_Error( CV_StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" ); + CV_Error( Error::StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" ); if( !mask.empty() && mask.type() != CV_8UC1 ) - CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" ); + CV_Error( Error::StsBadArg, "mask has incorrect type (!=CV_8UC1)" ); if( useProvidedKeypoints ) { diff --git a/modules/nonfree/src/surf.cpp b/modules/nonfree/src/surf.cpp index ea310478bd..34d7b96f45 100644 --- a/modules/nonfree/src/surf.cpp +++ b/modules/nonfree/src/surf.cpp @@ -427,7 +427,7 @@ void SURFFindInvoker::findMaximaInLayer( const Mat& sum, const Mat& mask_sum, float center_j = sum_j + (size-1)*0.5f; KeyPoint kpt( center_j, center_i, (float)sizes[layer], - -1, val0, octave, CV_SIGN(trace_ptr[j]) ); + -1, val0, octave, (trace_ptr[j] > 0) - (trace_ptr[j] < 0) ); /* Interpolate maxima location within the 3x3x3 neighbourhood */ int ds = size - sizes[layer-1]; @@ -550,7 +550,7 @@ struct SURFInvoker { if( i*i + j*j <= ORI_RADIUS*ORI_RADIUS ) { - apt[nOriSamples] = cvPoint(i,j); + apt[nOriSamples] = Point(i,j); aptw[nOriSamples++] = G_ori.at(i+ORI_RADIUS,0) * G_ori.at(j+ORI_RADIUS,0); } } @@ -580,9 +580,6 @@ struct SURFInvoker float X[nOriSampleBound], Y[nOriSampleBound], angle[nOriSampleBound]; uchar PATCH[PATCH_SZ+1][PATCH_SZ+1]; float DX[PATCH_SZ][PATCH_SZ], DY[PATCH_SZ][PATCH_SZ]; - CvMat matX = cvMat(1, nOriSampleBound, CV_32F, X); - CvMat matY = cvMat(1, nOriSampleBound, CV_32F, Y); - CvMat _angle = cvMat(1, nOriSampleBound, CV_32F, angle); Mat _patch(PATCH_SZ+1, PATCH_SZ+1, CV_8U, PATCH); int dsize = extended ? 128 : 64; @@ -594,7 +591,8 @@ struct SURFInvoker maxSize = std::max(maxSize, (*keypoints)[k].size); } int imaxSize = std::max(cvCeil((PATCH_SZ+1)*maxSize*1.2f/9.0f), 1); - Ptr winbuf = cvCreateMat( 1, imaxSize*imaxSize, CV_8U ); + cv::AutoBuffer winbuf(imaxSize*imaxSize); + for( k = k1; k < k2; k++ ) { int i, j, kk, nangle; @@ -648,8 +646,8 @@ struct SURFInvoker kp.size = -1; continue; } - matX.cols = matY.cols = _angle.cols = nangle; - cvCartToPolar( &matX, &matY, 0, &_angle, 1 ); + + phase( Mat(1, nangle, CV_32F, X), Mat(1, nangle, CV_32F, Y), Mat(1, nangle, CV_32F, angle), true ); float bestx = 0, besty = 0, descriptor_mod = 0; for( i = 0; i < 360; i += SURF_ORI_SEARCH_INC ) @@ -680,8 +678,8 @@ struct SURFInvoker /* Extract a window of pixels around the keypoint of size 20s */ int win_size = (int)((PATCH_SZ+1)*s); - CV_Assert( winbuf->cols >= win_size*win_size ); - Mat win(win_size, win_size, CV_8U, winbuf->data.ptr); + CV_Assert( imaxSize >= win_size ); + Mat win(win_size, win_size, CV_8U, winbuf); if( !upright ) { diff --git a/modules/nonfree/test/test_detectors.cpp b/modules/nonfree/test/test_detectors.cpp index 60d1bc1de3..155c9c869b 100644 --- a/modules/nonfree/test/test_detectors.cpp +++ b/modules/nonfree/test/test_detectors.cpp @@ -116,7 +116,7 @@ void showOrig(const Mat& img, const vector& orig_pts) cvtColor(img, img_color, COLOR_GRAY2BGR); for(size_t i = 0; i < orig_pts.size(); ++i) - circle(img_color, orig_pts[i].pt, (int)orig_pts[i].size/2, CV_RGB(0, 255, 0)); + circle(img_color, orig_pts[i].pt, (int)orig_pts[i].size/2, Scalar(0, 255, 0)); namedWindow("O"); imshow("O", img_color); } @@ -128,10 +128,10 @@ void show(const string& name, const Mat& new_img, const vector& new_pt cvtColor(new_img, new_img_color, COLOR_GRAY2BGR); for(size_t i = 0; i < transf_pts.size(); ++i) - circle(new_img_color, transf_pts[i].pt, (int)transf_pts[i].size/2, CV_RGB(255, 0, 0)); + circle(new_img_color, transf_pts[i].pt, (int)transf_pts[i].size/2, Scalar(255, 0, 0)); for(size_t i = 0; i < new_pts.size(); ++i) - circle(new_img_color, new_pts[i].pt, (int)new_pts[i].size/2, CV_RGB(0, 0, 255)); + circle(new_img_color, new_pts[i].pt, (int)new_pts[i].size/2, Scalar(0, 0, 255)); namedWindow(name + "_T"); imshow(name + "_T", new_img_color); } diff --git a/modules/nonfree/test/test_rotation_and_scale_invariance.cpp b/modules/nonfree/test/test_rotation_and_scale_invariance.cpp index 9ca0225c2d..b63b8b7489 100644 --- a/modules/nonfree/test/test_rotation_and_scale_invariance.cpp +++ b/modules/nonfree/test/test_rotation_and_scale_invariance.cpp @@ -232,7 +232,7 @@ protected: featureDetector->detect(image0, keypoints0); removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) - CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); + CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n"); const int maxAngle = 360, angleStep = 15; for(int angle = 0; angle < maxAngle; angle += angleStep) @@ -262,7 +262,7 @@ protected: float angle0 = keypoints0[matches[m].queryIdx].angle; float angle1 = keypoints1[matches[m].trainIdx].angle; if(angle0 == -1 || angle1 == -1) - CV_Error(CV_StsBadArg, "Given FeatureDetector is not rotation invariant, it can not be tested here.\n"); + CV_Error(Error::StsBadArg, "Given FeatureDetector is not rotation invariant, it can not be tested here.\n"); CV_Assert(angle0 >= 0.f && angle0 < 360.f); CV_Assert(angle1 >= 0.f && angle1 < 360.f); @@ -347,7 +347,7 @@ protected: featureDetector->detect(image0, keypoints0); removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) - CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); + CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n"); descriptorExtractor->compute(image0, keypoints0, descriptors0); BFMatcher bfmatcher(normType); @@ -432,7 +432,7 @@ protected: featureDetector->detect(image0, keypoints0); removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) - CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); + CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n"); for(int scaleIdx = 1; scaleIdx <= 3; scaleIdx++) { @@ -444,7 +444,7 @@ protected: featureDetector->detect(image1, keypoints1); removeVerySmallKeypoints(keypoints1); if(keypoints1.size() < 15) - CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); + CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n"); if(keypoints1.size() > keypoints0.size()) { @@ -553,7 +553,7 @@ protected: featureDetector->detect(image0, keypoints0); removeVerySmallKeypoints(keypoints0); if(keypoints0.size() < 15) - CV_Error(CV_StsAssert, "Detector gives too few points in a test image\n"); + CV_Error(Error::StsAssert, "Detector gives too few points in a test image\n"); Mat descriptors0; descriptorExtractor->compute(image0, keypoints0, descriptors0); diff --git a/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp b/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp index 637b79a84c..581a0292d9 100644 --- a/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp +++ b/modules/stitching/include/opencv2/stitching/detail/motion_estimators.hpp @@ -93,8 +93,8 @@ public: double confThresh() const { return conf_thresh_; } void setConfThresh(double conf_thresh) { conf_thresh_ = conf_thresh; } - CvTermCriteria termCriteria() { return term_criteria_; } - void setTermCriteria(const CvTermCriteria& term_criteria) { term_criteria_ = term_criteria; } + TermCriteria termCriteria() { return term_criteria_; } + void setTermCriteria(const TermCriteria& term_criteria) { term_criteria_ = term_criteria; } protected: BundleAdjusterBase(int num_params_per_cam, int num_errs_per_measurement) @@ -103,7 +103,7 @@ protected: { setRefinementMask(Mat::ones(3, 3, CV_8U)); setConfThresh(1.); - setTermCriteria(cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 1000, DBL_EPSILON)); + setTermCriteria(TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 1000, DBL_EPSILON)); } // Runs bundle adjustment @@ -132,7 +132,7 @@ protected: double conf_thresh_; //Levenberg–Marquardt algorithm termination criteria - CvTermCriteria term_criteria_; + TermCriteria term_criteria_; // Camera parameters matrix (CV_64F) Mat cam_params_; diff --git a/samples/android/hello-android/main.cpp b/samples/android/hello-android/main.cpp index 2b985c6421..e595e066d5 100644 --- a/samples/android/hello-android/main.cpp +++ b/samples/android/hello-android/main.cpp @@ -11,9 +11,9 @@ int main(int argc, char* argv[]) printf("%s\n", message); // put message to simple image - Size textsize = getTextSize(message, CV_FONT_HERSHEY_COMPLEX, 3, 5, 0); + Size textsize = getTextSize(message, FONT_HERSHEY_COMPLEX, 3, 5, 0); Mat img(textsize.height + 20, textsize.width + 20, CV_32FC1, Scalar(230,230,230)); - putText(img, message, Point(10, img.rows - 10), CV_FONT_HERSHEY_COMPLEX, 3, Scalar(0, 0, 0), 5); + putText(img, message, Point(10, img.rows - 10), FONT_HERSHEY_COMPLEX, 3, Scalar(0, 0, 0), 5); // save\show resulting image #if ANDROID