diff --git a/cmake/OpenCVCompilerOptions.cmake b/cmake/OpenCVCompilerOptions.cmake index 13559b5c8a..6c235ebfbf 100644 --- a/cmake/OpenCVCompilerOptions.cmake +++ b/cmake/OpenCVCompilerOptions.cmake @@ -98,6 +98,10 @@ if(CMAKE_COMPILER_IS_GNUCXX) add_extra_compiler_option(-pthread) endif() + if(CMAKE_COMPILER_IS_CLANGCXX) + add_extra_compiler_option(-Qunused-arguments) + endif() + if(OPENCV_WARNINGS_ARE_ERRORS) add_extra_compiler_option(-Werror) endif() @@ -127,6 +131,8 @@ if(CMAKE_COMPILER_IS_GNUCXX) endif() if(ENABLE_SSE2) add_extra_compiler_option(-msse2) + elseif(X86 OR X86_64) + add_extra_compiler_option(-mno-sse2) endif() if(ENABLE_NEON) add_extra_compiler_option("-mfpu=neon") @@ -139,6 +145,8 @@ if(CMAKE_COMPILER_IS_GNUCXX) if(NOT MINGW) if(ENABLE_AVX) add_extra_compiler_option(-mavx) + elseif(X86 OR X86_64) + add_extra_compiler_option(-mno-avx) endif() if(ENABLE_AVX2) add_extra_compiler_option(-mavx2) @@ -152,18 +160,26 @@ if(CMAKE_COMPILER_IS_GNUCXX) if(NOT OPENCV_EXTRA_CXX_FLAGS MATCHES "-mavx") if(ENABLE_SSE3) add_extra_compiler_option(-msse3) + elseif(X86 OR X86_64) + add_extra_compiler_option(-mno-sse3) endif() if(ENABLE_SSSE3) add_extra_compiler_option(-mssse3) + elseif(X86 OR X86_64) + add_extra_compiler_option(-mno-ssse3) endif() if(ENABLE_SSE41) add_extra_compiler_option(-msse4.1) + elseif(X86 OR X86_64) + add_extra_compiler_option(-mno-sse4.1) endif() if(ENABLE_SSE42) add_extra_compiler_option(-msse4.2) + elseif(X86 OR X86_64) + add_extra_compiler_option(-mno-sse4.2) endif() if(ENABLE_POPCNT) @@ -265,6 +281,11 @@ if(MSVC) endif() endif() +if(MSVC12 AND NOT CMAKE_GENERATOR MATCHES "Visual Studio") + set(OPENCV_EXTRA_C_FLAGS "${OPENCV_EXTRA_C_FLAGS} /FS") + set(OPENCV_EXTRA_CXX_FLAGS "${OPENCV_EXTRA_CXX_FLAGS} /FS") +endif() + # Extra link libs if the user selects building static libs: if(NOT BUILD_SHARED_LIBS AND CMAKE_COMPILER_IS_GNUCXX AND NOT ANDROID) # Android does not need these settings because they are already set by toolchain file diff --git a/cmake/OpenCVDetectCXXCompiler.cmake b/cmake/OpenCVDetectCXXCompiler.cmake index 871331883c..c8484dca3f 100644 --- a/cmake/OpenCVDetectCXXCompiler.cmake +++ b/cmake/OpenCVDetectCXXCompiler.cmake @@ -114,7 +114,7 @@ elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^(aarch64.*|AARCH64.*)") endif() -# Similar code is existed in OpenCVConfig.cmake +# Similar code exists in OpenCVConfig.cmake if(NOT DEFINED OpenCV_STATIC) # look for global setting if(NOT DEFINED BUILD_SHARED_LIBS OR BUILD_SHARED_LIBS) diff --git a/cmake/OpenCVFindLibsVideo.cmake b/cmake/OpenCVFindLibsVideo.cmake index af46ac5086..b2d927957f 100644 --- a/cmake/OpenCVFindLibsVideo.cmake +++ b/cmake/OpenCVFindLibsVideo.cmake @@ -216,49 +216,43 @@ if(WITH_FFMPEG) # Do an other trial FIND_FILE(BZIP2_LIBRARIES NAMES libbz2.so.1 PATHS /lib) endif() - endif(HAVE_FFMPEG) - endif() - - if(APPLE) - find_path(FFMPEG_INCLUDE_DIR "libavformat/avformat.h" - PATHS /usr/local /usr /opt - PATH_SUFFIXES include - DOC "The path to FFMPEG headers") - if(FFMPEG_INCLUDE_DIR) - set(HAVE_GENTOO_FFMPEG TRUE) - set(FFMPEG_LIB_DIR "${FFMPEG_INCLUDE_DIR}/../lib" CACHE PATH "Full path of FFMPEG library directory") - if(EXISTS "${FFMPEG_LIB_DIR}/libavcodec.a") - set(HAVE_FFMPEG_CODEC 1) - set(ALIASOF_libavcodec_VERSION "Unknown") - if(EXISTS "${FFMPEG_LIB_DIR}/libavformat.a") - set(HAVE_FFMPEG_FORMAT 1) + else() + find_path(FFMPEG_INCLUDE_DIR "libavformat/avformat.h" + PATHS /usr/local /usr /opt + PATH_SUFFIXES include + DOC "The path to FFMPEG headers") + if(FFMPEG_INCLUDE_DIR) + set(HAVE_GENTOO_FFMPEG TRUE) + set(FFMPEG_LIB_DIR "${FFMPEG_INCLUDE_DIR}/../lib" CACHE PATH "Full path of FFMPEG library directory") + find_library(FFMPEG_CODEC_LIB "avcodec" HINTS "${FFMPEG_LIB_DIR}") + find_library(FFMPEG_FORMAT_LIB "avformat" HINTS "${FFMPEG_LIB_DIR}") + find_library(FFMPEG_UTIL_LIB "avutil" HINTS "${FFMPEG_LIB_DIR}") + find_library(FFMPEG_SWSCALE_LIB "swscale" HINTS "${FFMPEG_LIB_DIR}") + find_library(FFMPEG_RESAMPLE_LIB "avresample" HINTS "${FFMPEG_LIB_DIR}") + if(FFMPEG_CODEC_LIB AND FFMPEG_FORMAT_LIB AND + FFMPEG_UTIL_LIB AND FFMPEG_SWSCALE_LIB) + set(ALIASOF_libavcodec_VERSION "Unknown") set(ALIASOF_libavformat_VERSION "Unknown") - if(EXISTS "${FFMPEG_LIB_DIR}/libavutil.a") - set(HAVE_FFMPEG_UTIL 1) - set(ALIASOF_libavutil_VERSION "Unknown") - if(EXISTS "${FFMPEG_LIB_DIR}/libswscale.a") - set(HAVE_FFMPEG_SWSCALE 1) - set(ALIASOF_libswscale_VERSION "Unknown") - set(HAVE_FFMPEG 1) - if(EXISTS "${FFMPEG_LIB_DIR}/libavresample.a") - set(HAVE_FFMPEG_RESAMPLE 1) - set(ALIASOF_libavresample_VERSION "Unknown") - endif() - endif() + set(ALIASOF_libavutil_VERSION "Unknown") + set(ALIASOF_libswscale_VERSION "Unknown") + set(HAVE_FFMPEG 1) + if(FFMPEG_RESAMPLE_LIB) + set(HAVE_FFMPEG_RESAMPLE 1) + set(ALIASOF_libavresample_VERSION "Unknown") endif() endif() - endif() - endif(FFMPEG_INCLUDE_DIR) - if(HAVE_FFMPEG) - set(VIDEOIO_LIBRARIES ${VIDEOIO_LIBRARIES} "${FFMPEG_LIB_DIR}/libavcodec.a" - "${FFMPEG_LIB_DIR}/libavformat.a" "${FFMPEG_LIB_DIR}/libavutil.a" - "${FFMPEG_LIB_DIR}/libswscale.a") - if(HAVE_FFMPEG_RESAMPLE) + endif(FFMPEG_INCLUDE_DIR) + if(HAVE_FFMPEG) + set(VIDEOIO_LIBRARIES ${VIDEOIO_LIBRARIES} "${FFMPEG_LIB_DIR}/libavcodec.a" + "${FFMPEG_LIB_DIR}/libavformat.a" "${FFMPEG_LIB_DIR}/libavutil.a" + "${FFMPEG_LIB_DIR}/libswscale.a") + if(HAVE_FFMPEG_RESAMPLE) set(VIDEOIO_LIBRARIES ${VIDEOIO_LIBRARIES} "${FFMPEG_LIB_DIR}/libavresample.a") - endif() - ocv_include_directories(${FFMPEG_INCLUDE_DIR}) + endif() + ocv_include_directories(${FFMPEG_INCLUDE_DIR}) + endif(HAVE_FFMPEG) endif() - endif(APPLE) + endif() endif(WITH_FFMPEG) # --- VideoInput/DirectShow --- diff --git a/cmake/templates/OpenCVConfig.cmake.in b/cmake/templates/OpenCVConfig.cmake.in index bdc4c56f96..a3cbbc08c7 100644 --- a/cmake/templates/OpenCVConfig.cmake.in +++ b/cmake/templates/OpenCVConfig.cmake.in @@ -77,6 +77,13 @@ if("@USE_IPPICV@" STREQUAL "TRUE") # value is defined by package builder (use ST endif() if(NOT TARGET opencv_core) + # Extract directory name from full path of the file currently being processed. + # Note that CMake 2.8.3 introduced CMAKE_CURRENT_LIST_DIR. We reimplement it + # for older versions of CMake to support these as well. + if(CMAKE_VERSION VERSION_LESS "2.8.3") + get_filename_component(CMAKE_CURRENT_LIST_DIR "${CMAKE_CURRENT_LIST_FILE}" PATH) + endif() + include(${CMAKE_CURRENT_LIST_DIR}/OpenCVModules${OpenCV_MODULES_SUFFIX}.cmake) endif() diff --git a/doc/tutorials/core/adding_images/adding_images.markdown b/doc/tutorials/core/adding_images/adding_images.markdown index b6ef7b7cd2..1565e7ed8b 100644 --- a/doc/tutorials/core/adding_images/adding_images.markdown +++ b/doc/tutorials/core/adding_images/adding_images.markdown @@ -22,7 +22,7 @@ From our previous tutorial, we know already a bit of *Pixel operators*. An inter \f[g(x) = (1 - \alpha)f_{0}(x) + \alpha f_{1}(x)\f] By varying \f$\alpha\f$ from \f$0 \rightarrow 1\f$ this operator can be used to perform a temporal -*cross-disolve* between two images or videos, as seen in slide shows and film productions (cool, +*cross-dissolve* between two images or videos, as seen in slide shows and film productions (cool, eh?) Code diff --git a/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.markdown b/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.markdown index f7888590c9..db1f774211 100644 --- a/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.markdown +++ b/doc/tutorials/core/basic_geometric_drawing/basic_geometric_drawing.markdown @@ -145,7 +145,7 @@ Explanation of size **(w/4.0, w/16.0)** - The ellipse is rotated **angle** degrees - The ellipse extends an arc between **0** and **360** degrees - - The color of the figure will be **Scalar( 255, 255, 0)** which means blue in RGB value. + - The color of the figure will be **Scalar( 255, 0, 0)** which means blue in RGB value. - The ellipse's **thickness** is 2. - *MyFilledCircle* @code{.cpp} diff --git a/doc/tutorials/core/random_generator_and_text/random_generator_and_text.markdown b/doc/tutorials/core/random_generator_and_text/random_generator_and_text.markdown index fa7dc07ee7..b9d39756b0 100644 --- a/doc/tutorials/core/random_generator_and_text/random_generator_and_text.markdown +++ b/doc/tutorials/core/random_generator_and_text/random_generator_and_text.markdown @@ -111,7 +111,7 @@ Explanation pt1.y = rng.uniform( y_1, y_2 ); @endcode - We know that **rng** is a *Random number generator* object. In the code above we are - calling **rng.uniform(a,b)**. This generates a radombly uniformed distribution between + calling **rng.uniform(a,b)**. This generates a randomly uniformed distribution between the values **a** and **b** (inclusive in **a**, exclusive in **b**). - From the explanation above, we deduce that the extremes *pt1* and *pt2* will be random values, so the lines positions will be quite impredictable, giving a nice visual effect @@ -133,7 +133,7 @@ Explanation are used as the *R*, *G* and *B* parameters for the line color. Hence, the color of the lines will be random too! --# The explanation above applies for the other functions generating circles, ellipses, polygones, +-# The explanation above applies for the other functions generating circles, ellipses, polygons, etc. The parameters such as *center* and *vertices* are also generated randomly. -# Before finishing, we also should take a look at the functions *Display_Random_Text* and *Displaying_Big_End*, since they both have a few interesting features: diff --git a/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.markdown b/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.markdown index cc73fca1e0..9c651a6195 100644 --- a/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.markdown +++ b/doc/tutorials/imgproc/imgtrans/hough_lines/hough_lines.markdown @@ -55,7 +55,7 @@ Arranging the terms: \f$r = x \cos \theta + y \sin \theta\f$ -# We can do the same operation above for all the points in an image. If the curves of two different points intersect in the plane \f$\theta\f$ - \f$r\f$, that means that both points belong to a same line. For instance, following with the example above and drawing the plot for two more - points: \f$x_{1} = 9\f$, \f$y_{1} = 4\f$ and \f$x_{2} = 12\f$, \f$y_{2} = 3\f$, we get: + points: \f$x_{1} = 4\f$, \f$y_{1} = 9\f$ and \f$x_{2} = 12\f$, \f$y_{2} = 3\f$, we get: ![](images/Hough_Lines_Tutorial_Theory_2.jpg) diff --git a/modules/calib3d/include/opencv2/calib3d.hpp b/modules/calib3d/include/opencv2/calib3d.hpp index 744ded305b..65cf557b4b 100644 --- a/modules/calib3d/include/opencv2/calib3d.hpp +++ b/modules/calib3d/include/opencv2/calib3d.hpp @@ -697,19 +697,19 @@ CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize, /** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern. -@param objectPoints In the new interface it is a vector of vectors of calibration pattern points -in the calibration pattern coordinate space. The outer vector contains as many elements as the -number of the pattern views. If the same calibration pattern is shown in each view and it is fully -visible, all the vectors will be the same. Although, it is possible to use partially occluded -patterns, or even different patterns in different views. Then, the vectors will be different. The -points are 3D, but since they are in a pattern coordinate system, then, if the rig is planar, it -may make sense to put the model to a XY coordinate plane so that Z-coordinate of each input object -point is 0. +@param objectPoints In the new interface it is a vector of vectors of calibration pattern points in +the calibration pattern coordinate space (e.g. std::vector>). The outer +vector contains as many elements as the number of the pattern views. If the same calibration pattern +is shown in each view and it is fully visible, all the vectors will be the same. Although, it is +possible to use partially occluded patterns, or even different patterns in different views. Then, +the vectors will be different. The points are 3D, but since they are in a pattern coordinate system, +then, if the rig is planar, it may make sense to put the model to a XY coordinate plane so that +Z-coordinate of each input object point is 0. In the old interface all the vectors of object points from different views are concatenated together. -@param imagePoints In the new interface it is a vector of vectors of the projections of -calibration pattern points. imagePoints.size() and objectPoints.size() and imagePoints[i].size() -must be equal to objectPoints[i].size() for each i. +@param imagePoints In the new interface it is a vector of vectors of the projections of calibration +pattern points (e.g. std::vector>). imagePoints.size() and +objectPoints.size() and imagePoints[i].size() must be equal to objectPoints[i].size() for each i. In the old interface all the vectors of object points from different views are concatenated together. @param imageSize Size of the image used only to initialize the intrinsic camera matrix. @@ -719,11 +719,11 @@ and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy mu initialized before calling the function. @param distCoeffs Output vector of distortion coefficients \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements. -@param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view. -That is, each k-th rotation vector together with the corresponding k-th translation vector (see -the next output parameter description) brings the calibration pattern from the model coordinate -space (in which object points are specified) to the world coordinate space, that is, a real -position of the calibration pattern in the k-th pattern view (k=0.. *M* -1). +@param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view +(e.g. std::vector>). That is, each k-th rotation vector together with the corresponding +k-th translation vector (see the next output parameter description) brings the calibration pattern +from the model coordinate space (in which object points are specified) to the world coordinate +space, that is, a real position of the calibration pattern in the k-th pattern view (k=0.. *M* -1). @param tvecs Output vector of translation vectors estimated for each pattern view. @param flags Different flags that may be zero or a combination of the following values: - **CV_CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of diff --git a/modules/calib3d/test/test_reproject_image_to_3d.cpp b/modules/calib3d/test/test_reproject_image_to_3d.cpp index 3e77a290ce..7364d3bf46 100644 --- a/modules/calib3d/test/test_reproject_image_to_3d.cpp +++ b/modules/calib3d/test/test_reproject_image_to_3d.cpp @@ -138,7 +138,12 @@ protected: { InT d = disp(y, x); - double from[4] = { x, y, d, 1 }; + double from[4] = { + static_cast(x), + static_cast(y), + static_cast(d), + 1.0, + }; Mat_ res = Q * Mat_(4, 1, from); res /= res(3, 0); diff --git a/modules/calib3d/test/test_solvepnp_ransac.cpp b/modules/calib3d/test/test_solvepnp_ransac.cpp index c8d8735b8e..76a3966bba 100644 --- a/modules/calib3d/test/test_solvepnp_ransac.cpp +++ b/modules/calib3d/test/test_solvepnp_ransac.cpp @@ -183,6 +183,9 @@ protected: method, totalTestsCount - successfulTestsCount, totalTestsCount, maxError, mode); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); } + cout << "mode: " << mode << ", method: " << method << " -> " + << ((double)successfulTestsCount / totalTestsCount) * 100 << "%" + << " (err < " << maxError << ")" << endl; } } } diff --git a/modules/calib3d/test/test_undistort_badarg.cpp b/modules/calib3d/test/test_undistort_badarg.cpp index f3f762fa65..cfce8a49c6 100644 --- a/modules/calib3d/test/test_undistort_badarg.cpp +++ b/modules/calib3d/test/test_undistort_badarg.cpp @@ -104,7 +104,10 @@ void CV_UndistortPointsBadArgTest::run(int) img_size.height = 600; double cam[9] = {150.f, 0.f, img_size.width/2.f, 0, 300.f, img_size.height/2.f, 0.f, 0.f, 1.f}; double dist[4] = {0.01,0.02,0.001,0.0005}; - double s_points[N_POINTS2] = {img_size.width/4,img_size.height/4}; + double s_points[N_POINTS2] = { + static_cast(img_size.width) / 4.0, + static_cast(img_size.height) / 4.0, + }; double d_points[N_POINTS2]; double p[9] = {155.f, 0.f, img_size.width/2.f+img_size.width/50.f, 0, 310.f, img_size.height/2.f+img_size.height/50.f, 0.f, 0.f, 1.f}; double r[9] = {1,0,0,0,1,0,0,0,1}; diff --git a/modules/core/include/opencv2/core/affine.hpp b/modules/core/include/opencv2/core/affine.hpp index f8e84b97ad..3b527cd221 100644 --- a/modules/core/include/opencv2/core/affine.hpp +++ b/modules/core/include/opencv2/core/affine.hpp @@ -253,7 +253,7 @@ void cv::Affine3::rotation(const Vec3& _rvec) double c = std::cos(theta); double s = std::sin(theta); double c1 = 1. - c; - double itheta = theta ? 1./theta : 0.; + double itheta = (theta != 0) ? 1./theta : 0.; rx *= itheta; ry *= itheta; rz *= itheta; diff --git a/modules/core/src/datastructs.cpp b/modules/core/src/datastructs.cpp index c0067f8fc4..519d00ee53 100644 --- a/modules/core/src/datastructs.cpp +++ b/modules/core/src/datastructs.cpp @@ -651,7 +651,7 @@ icvGrowSeq( CvSeq *seq, int in_front_of ) /* If there is a free space just after last allocated block and it is big enough then enlarge the last block. This can happen only if the new block is added to the end of sequence: */ - if( (unsigned)(ICV_FREE_PTR(storage) - seq->block_max) < CV_STRUCT_ALIGN && + if( (size_t)(ICV_FREE_PTR(storage) - seq->block_max) < CV_STRUCT_ALIGN && storage->free_space >= seq->elem_size && !in_front_of ) { int delta = storage->free_space / elem_size; diff --git a/modules/core/test/test_io.cpp b/modules/core/test/test_io.cpp index 37bff62151..0c401f8ebd 100644 --- a/modules/core/test/test_io.cpp +++ b/modules/core/test/test_io.cpp @@ -144,7 +144,11 @@ protected: depth = cvtest::randInt(rng) % (CV_64F+1); cn = cvtest::randInt(rng) % 4 + 1; - int sz[] = {cvtest::randInt(rng)%10+1, cvtest::randInt(rng)%10+1, cvtest::randInt(rng)%10+1}; + int sz[] = { + static_cast(cvtest::randInt(rng)%10+1), + static_cast(cvtest::randInt(rng)%10+1), + static_cast(cvtest::randInt(rng)%10+1), + }; MatND test_mat_nd(3, sz, CV_MAKETYPE(depth, cn)); rng0.fill(test_mat_nd, CV_RAND_UNI, Scalar::all(ranges[depth][0]), Scalar::all(ranges[depth][1])); @@ -156,8 +160,12 @@ protected: multiply(test_mat_nd, test_mat_scale, test_mat_nd); } - int ssz[] = {cvtest::randInt(rng)%10+1, cvtest::randInt(rng)%10+1, - cvtest::randInt(rng)%10+1,cvtest::randInt(rng)%10+1}; + int ssz[] = { + static_cast(cvtest::randInt(rng)%10+1), + static_cast(cvtest::randInt(rng)%10+1), + static_cast(cvtest::randInt(rng)%10+1), + static_cast(cvtest::randInt(rng)%10+1), + }; SparseMat test_sparse_mat = cvTsGetRandomSparseMat(4, ssz, cvtest::randInt(rng)%(CV_64F+1), cvtest::randInt(rng) % 10000, 0, 100, rng); diff --git a/modules/cudawarping/perf/perf_warping.cpp b/modules/cudawarping/perf/perf_warping.cpp index 36662418c3..6ce547e60e 100644 --- a/modules/cudawarping/perf/perf_warping.cpp +++ b/modules/cudawarping/perf/perf_warping.cpp @@ -253,7 +253,7 @@ PERF_TEST_P(Sz_Depth_Cn_Inter_Border, WarpAffine, const double aplha = CV_PI / 4; const double mat[2 * 3] = { - std::cos(aplha), -std::sin(aplha), src.cols / 2, + std::cos(aplha), -std::sin(aplha), static_cast(src.cols) / 2.0, std::sin(aplha), std::cos(aplha), 0 }; const cv::Mat M(2, 3, CV_64F, (void*) mat); @@ -301,7 +301,7 @@ PERF_TEST_P(Sz_Depth_Cn_Inter_Border, WarpPerspective, declare.in(src, WARMUP_RNG); const double aplha = CV_PI / 4; - double mat[3][3] = { {std::cos(aplha), -std::sin(aplha), src.cols / 2}, + double mat[3][3] = { {std::cos(aplha), -std::sin(aplha), static_cast(src.cols) / 2.0}, {std::sin(aplha), std::cos(aplha), 0}, {0.0, 0.0, 1.0}}; const cv::Mat M(3, 3, CV_64F, (void*) mat); diff --git a/modules/flann/include/opencv2/flann/miniflann.hpp b/modules/flann/include/opencv2/flann/miniflann.hpp index f2acc23bff..02fa236d3a 100644 --- a/modules/flann/include/opencv2/flann/miniflann.hpp +++ b/modules/flann/include/opencv2/flann/miniflann.hpp @@ -89,13 +89,13 @@ struct CV_EXPORTS LinearIndexParams : public IndexParams struct CV_EXPORTS CompositeIndexParams : public IndexParams { CompositeIndexParams(int trees = 4, int branching = 32, int iterations = 11, - cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2 ); + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f ); }; struct CV_EXPORTS AutotunedIndexParams : public IndexParams { - AutotunedIndexParams(float target_precision = 0.8, float build_weight = 0.01, - float memory_weight = 0, float sample_fraction = 0.1); + AutotunedIndexParams(float target_precision = 0.8f, float build_weight = 0.01f, + float memory_weight = 0, float sample_fraction = 0.1f); }; struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams @@ -107,7 +107,7 @@ struct CV_EXPORTS HierarchicalClusteringIndexParams : public IndexParams struct CV_EXPORTS KMeansIndexParams : public IndexParams { KMeansIndexParams(int branching = 32, int iterations = 11, - cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2 ); + cvflann::flann_centers_init_t centers_init = cvflann::FLANN_CENTERS_RANDOM, float cb_index = 0.2f ); }; struct CV_EXPORTS LshIndexParams : public IndexParams diff --git a/modules/highgui/src/window_QT.cpp b/modules/highgui/src/window_QT.cpp index c89918ee26..d0aad919ee 100644 --- a/modules/highgui/src/window_QT.cpp +++ b/modules/highgui/src/window_QT.cpp @@ -117,7 +117,7 @@ CV_IMPL void cvAddText(const CvArr* img, const char* text, CvPoint org, CvFont* "putText", autoBlockingConnection(), Q_ARG(void*, (void*) img), - Q_ARG(QString,QString(text)), + Q_ARG(QString,QString::fromUtf8(text)), Q_ARG(QPoint, QPoint(org.x,org.y)), Q_ARG(void*,(void*) font)); } @@ -418,12 +418,14 @@ static CvBar* icvFindBarByName(QBoxLayout* layout, QString name_bar, typeBar typ static CvTrackbar* icvFindTrackBarByName(const char* name_trackbar, const char* name_window, QBoxLayout* layout = NULL) { QString nameQt(name_trackbar); - if ((!name_window || !name_window[0]) && global_control_panel) //window name is null and we have a control panel + QString nameWinQt(name_window); + + if (nameWinQt.isEmpty() && global_control_panel) //window name is null and we have a control panel layout = global_control_panel->myLayout; if (!layout) { - QPointer w = icvFindWindowByName(QLatin1String(name_window)); + QPointer w = icvFindWindowByName(nameWinQt); if (!w) CV_Error(CV_StsNullPtr, "NULL window handler"); @@ -1875,7 +1877,7 @@ bool CvWindow::isOpenGl() void CvWindow::setViewportSize(QSize _size) { - myView->getWidget()->resize(_size); + resize(_size); myView->setSize(_size); } diff --git a/modules/imgproc/include/opencv2/imgproc.hpp b/modules/imgproc/include/opencv2/imgproc.hpp index 3db822db3d..ac93e45809 100644 --- a/modules/imgproc/include/opencv2/imgproc.hpp +++ b/modules/imgproc/include/opencv2/imgproc.hpp @@ -3494,7 +3494,7 @@ CV_EXPORTS_W double contourArea( InputArray contour, bool oriented = false ); The function calculates and returns the minimum-area bounding rectangle (possibly rotated) for a specified point set. See the OpenCV sample minarea.cpp . Developer should keep in mind that the -returned rotatedRect can contain negative indices when data is close the the containing Mat element +returned rotatedRect can contain negative indices when data is close to the containing Mat element boundary. @param points Input vector of 2D points, stored in std::vector\<\> or Mat diff --git a/modules/imgproc/perf/perf_moments.cpp b/modules/imgproc/perf/perf_moments.cpp index 9b3c5428f3..e5a9f036c9 100644 --- a/modules/imgproc/perf/perf_moments.cpp +++ b/modules/imgproc/perf/perf_moments.cpp @@ -34,5 +34,11 @@ PERF_TEST_P(MomentsFixture_val, Moments1, TEST_CYCLE() m = cv::moments(src, binaryImage); - SANITY_CHECK_MOMENTS(m, 1e-4, ERROR_RELATIVE); + int len = (int)sizeof(cv::Moments) / sizeof(double); + cv::Mat mat(1, len, CV_64F, (void*)&m); + //adding 1 to moments to avoid accidental tests fail on values close to 0 + mat += 1; + + + SANITY_CHECK_MOMENTS(m, 2e-4, ERROR_RELATIVE); } diff --git a/modules/imgproc/src/drawing.cpp b/modules/imgproc/src/drawing.cpp index 27411b247a..e52312a3b8 100644 --- a/modules/imgproc/src/drawing.cpp +++ b/modules/imgproc/src/drawing.cpp @@ -2229,7 +2229,10 @@ void cv::polylines(InputOutputArray _img, InputArrayOfArrays pts, { Mat p = pts.getMat(manyContours ? i : -1); if( p.total() == 0 ) + { + npts[i] = 0; continue; + } CV_Assert(p.checkVector(2, CV_32S) >= 0); ptsptr[i] = p.ptr(); npts[i] = p.rows*p.cols*p.channels()/2; diff --git a/modules/imgproc/src/smooth.cpp b/modules/imgproc/src/smooth.cpp index 90840cdaa5..dbe8a6315a 100644 --- a/modules/imgproc/src/smooth.cpp +++ b/modules/imgproc/src/smooth.cpp @@ -2770,7 +2770,7 @@ public: #if CV_SSE3 int CV_DECL_ALIGNED(16) buf[4]; float CV_DECL_ALIGNED(16) bufSum[4]; - static const int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }; + static const unsigned int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }; bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3); #endif @@ -3152,7 +3152,7 @@ public: #if CV_SSE3 int CV_DECL_ALIGNED(16) idxBuf[4]; float CV_DECL_ALIGNED(16) bufSum32[4]; - static const int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }; + static const unsigned int CV_DECL_ALIGNED(16) bufSignMask[] = { 0x80000000, 0x80000000, 0x80000000, 0x80000000 }; bool haveSSE3 = checkHardwareSupport(CV_CPU_SSE3); #endif diff --git a/modules/imgproc/test/test_contours.cpp b/modules/imgproc/test/test_contours.cpp index 6c5c3f0ebb..b0b8c4fbb5 100644 --- a/modules/imgproc/test/test_contours.cpp +++ b/modules/imgproc/test/test_contours.cpp @@ -410,4 +410,23 @@ TEST(Core_Drawing, _914) ASSERT_EQ( (3*rows + cols)*3 - 3*9, pixelsDrawn); } +TEST(Core_Drawing, polylines_empty) +{ + Mat img(100, 100, CV_8UC1, Scalar(0)); + vector pts; // empty + polylines(img, pts, false, Scalar(255)); + int cnt = countNonZero(img); + ASSERT_EQ(cnt, 0); +} + +TEST(Core_Drawing, polylines) +{ + Mat img(100, 100, CV_8UC1, Scalar(0)); + vector pts; + pts.push_back(Point(0, 0)); + pts.push_back(Point(20, 0)); + polylines(img, pts, false, Scalar(255)); + int cnt = countNonZero(img); + ASSERT_EQ(cnt, 21); +} /* End of file. */ diff --git a/modules/python/src2/cv2.cpp b/modules/python/src2/cv2.cpp index 3611253202..974545994b 100644 --- a/modules/python/src2/cv2.cpp +++ b/modules/python/src2/cv2.cpp @@ -226,7 +226,7 @@ static bool pyopencv_to(PyObject* o, Mat& m, const ArgInfo info) if( PyInt_Check(o) ) { - double v[] = {(double)PyInt_AsLong((PyObject*)o), 0., 0., 0.}; + double v[] = {static_cast(PyInt_AsLong((PyObject*)o)), 0., 0., 0.}; m = Mat(4, 1, CV_64F, v).clone(); return true; } diff --git a/modules/ts/src/ts_perf.cpp b/modules/ts/src/ts_perf.cpp index f2eae265ac..f5ba1d81e6 100644 --- a/modules/ts/src/ts_perf.cpp +++ b/modules/ts/src/ts_perf.cpp @@ -438,9 +438,9 @@ static int countViolations(const cv::Mat& expected, const cv::Mat& actual, const if (v > 0 && max_violation != 0 && max_allowed != 0) { - int loc[10]; + int loc[10] = {0}; cv::minMaxIdx(maximum, 0, max_allowed, 0, loc, mask); - *max_violation = diff64f.at(loc[1], loc[0]); + *max_violation = diff64f.at(loc[0], loc[1]); } return v; diff --git a/modules/videoio/src/cap_msmf.hpp b/modules/videoio/src/cap_msmf.hpp index 4fdf41fb31..0987c704f5 100644 --- a/modules/videoio/src/cap_msmf.hpp +++ b/modules/videoio/src/cap_msmf.hpp @@ -603,11 +603,6 @@ public: ComPtr() throw() { } - ComPtr(int nNull) throw() - { - assert(nNull == 0); - p = NULL; - } ComPtr(T* lp) throw() { p = lp; @@ -638,13 +633,6 @@ public: { return p.operator==(pT); } - // For comparison to NULL - bool operator==(int nNull) const - { - assert(nNull == 0); - return p.operator==(NULL); - } - bool operator!=(_In_opt_ T* pT) const throw() { return p.operator!=(pT); @@ -3123,7 +3111,7 @@ public: HRESULT hr = CheckShutdown(); if (SUCCEEDED(hr)) { - if (m_spClock == NULL) { + if (!m_spClock) { hr = MF_E_NO_CLOCK; // There is no presentation clock. } else { // Return the pointer to the caller. diff --git a/samples/cpp/openni_capture.cpp b/samples/cpp/openni_capture.cpp index 64aa90bc69..09f1d21e09 100644 --- a/samples/cpp/openni_capture.cpp +++ b/samples/cpp/openni_capture.cpp @@ -13,14 +13,14 @@ static void help() "The user gets some of the supported output images.\n" "\nAll supported output map types:\n" "1.) Data given from depth generator\n" - " CV_CAP_OPENNI_DEPTH_MAP - depth values in mm (CV_16UC1)\n" - " CV_CAP_OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3)\n" - " CV_CAP_OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1)\n" - " CV_CAP_OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1)\n" - " CV_CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not ocluded, not shaded etc.) (CV_8UC1)\n" + " CAP_OPENNI_DEPTH_MAP - depth values in mm (CV_16UC1)\n" + " CAP_OPENNI_POINT_CLOUD_MAP - XYZ in meters (CV_32FC3)\n" + " CAP_OPENNI_DISPARITY_MAP - disparity in pixels (CV_8UC1)\n" + " CAP_OPENNI_DISPARITY_MAP_32F - disparity in pixels (CV_32FC1)\n" + " CAP_OPENNI_VALID_DEPTH_MASK - mask of valid pixels (not ocluded, not shaded etc.) (CV_8UC1)\n" "2.) Data given from RGB image generator\n" - " CV_CAP_OPENNI_BGR_IMAGE - color image (CV_8UC3)\n" - " CV_CAP_OPENNI_GRAY_IMAGE - gray image (CV_8UC1)\n" + " CAP_OPENNI_BGR_IMAGE - color image (CV_8UC3)\n" + " CAP_OPENNI_GRAY_IMAGE - gray image (CV_8UC1)\n" << endl; } @@ -89,8 +89,8 @@ static void printCommandLineParams() { cout << "-cd Colorized disparity? (0 or 1; 1 by default) Ignored if disparity map is not selected to show." << endl; cout << "-fmd Fixed max disparity? (0 or 1; 0 by default) Ignored if disparity map is not colorized (-cd 0)." << endl; - cout << "-mode image mode: resolution and fps, supported three values: 0 - CV_CAP_OPENNI_VGA_30HZ, 1 - CV_CAP_OPENNI_SXGA_15HZ," << endl; - cout << " 2 - CV_CAP_OPENNI_SXGA_30HZ (0 by default). Ignored if rgb image or gray image are not selected to show." << endl; + cout << "-mode image mode: resolution and fps, supported three values: 0 - CAP_OPENNI_VGA_30HZ, 1 - CAP_OPENNI_SXGA_15HZ," << endl; + cout << " 2 - CAP_OPENNI_SXGA_30HZ (0 by default). Ignored if rgb image or gray image are not selected to show." << endl; cout << "-m Mask to set which output images are need. It is a string of size 5. Each element of this is '0' or '1' and" << endl; cout << " determine: is depth map, disparity map, valid pixels mask, rgb image, gray image need or not (correspondently)?" << endl ; cout << " By default -m 01010 i.e. disparity map and rgb image will be shown." << endl ;