From 32aba5e64ba40b372f02b5fab18c9632f763bb75 Mon Sep 17 00:00:00 2001 From: "luz.paz" Date: Wed, 14 Aug 2019 13:33:49 -0400 Subject: [PATCH] FIx misc. source and comment typos Found via `codespell -q 3 -S ./3rdparty,./modules -L amin,ang,atleast,dof,endwhile,hist,uint` --- apps/createsamples/utility.cpp | 4 ++-- cmake/FindCUDA/run_nvcc.cmake | 2 +- cmake/OpenCVCompilerOptimizations.cmake | 2 +- cmake/OpenCVDetectApacheAnt.cmake | 2 +- cmake/OpenCVDetectInferenceEngine.cmake | 2 +- cmake/OpenCVDetectPython.cmake | 2 +- cmake/OpenCVUtils.cmake | 2 +- cmake/android/android_ant_projects.cmake | 2 +- .../js_lucas_kanade/js_lucas_kanade.markdown | 2 +- .../calib3d/real_time_pose/real_time_pose.markdown | 4 ++-- .../feature_flann_matcher.markdown | 4 ++-- .../porting_anisotropic_segmentation.markdown | 12 ++++++------ .../imgcodecs/raster-gdal/raster_io_gdal.markdown | 2 +- .../gausian_median_blur_bilateral_filter.markdown | 2 +- .../morph_lines_detection/morph_lines_detection.md | 2 +- .../transition_guide/transition_guide.markdown | 2 +- .../ios/video_processing/video_processing.markdown | 2 +- .../ml/non_linear_svms/non_linear_svms.markdown | 2 +- .../cascade_classifier/cascade_classifier.markdown | 2 +- .../video/optical_flow/optical_flow.markdown | 2 +- samples/cpp/stitching_detailed.cpp | 2 +- samples/cpp/videocapture_gphoto2_autofocus.cpp | 2 +- samples/directx/d3d11_interop.cpp | 2 +- .../dnn/face_detector/how_to_train_face_detector.txt | 2 +- samples/dnn/openpose.py | 2 +- samples/opencl/opencl-opencv-interop.cpp | 2 +- .../imgProc/threshold_inRange/threshold_inRange.py | 2 +- .../cascade_classifier/objectDetection.py | 2 +- 28 files changed, 36 insertions(+), 36 deletions(-) diff --git a/apps/createsamples/utility.cpp b/apps/createsamples/utility.cpp index ab1ca1c789..919ad2dcc4 100644 --- a/apps/createsamples/utility.cpp +++ b/apps/createsamples/utility.cpp @@ -895,7 +895,7 @@ void icvGetNextFromBackgroundData( CvBackgroundData* data, * #pragma omp parallel * { * ... - * icvGetBackgourndImage( cvbgdata, cvbgreader, img ); + * icvGetBackgroundImage( cvbgdata, cvbgreader, img ); * ... * } * ... @@ -990,7 +990,7 @@ static int icvInitBackgroundReaders( const char* filename, Size winsize ) /* * icvDestroyBackgroundReaders * - * Finish backgournd reading process + * Finish background reading process */ static void icvDestroyBackgroundReaders() diff --git a/cmake/FindCUDA/run_nvcc.cmake b/cmake/FindCUDA/run_nvcc.cmake index c372fe44ec..25527b6938 100644 --- a/cmake/FindCUDA/run_nvcc.cmake +++ b/cmake/FindCUDA/run_nvcc.cmake @@ -136,7 +136,7 @@ macro(cuda_execute_process status command) # copy and paste a runnable command line. set(cuda_execute_process_string) foreach(arg ${ARGN}) - # If there are quotes, excape them, so they come through. + # If there are quotes, escape them, so they come through. string(REPLACE "\"" "\\\"" arg ${arg}) # Args with spaces need quotes around them to get them to be parsed as a single argument. if(arg MATCHES " ") diff --git a/cmake/OpenCVCompilerOptimizations.cmake b/cmake/OpenCVCompilerOptimizations.cmake index 9e4691760c..92cce39baa 100644 --- a/cmake/OpenCVCompilerOptimizations.cmake +++ b/cmake/OpenCVCompilerOptimizations.cmake @@ -854,7 +854,7 @@ macro(__ocv_add_dispatched_file filename target_src_var src_directory dst_direct if(";${CPU_DISPATCH_FINAL};" MATCHES "${OPT}" OR __CPU_DISPATCH_INCLUDE_ALL) if(EXISTS "${src_directory}/${filename}.${OPT_LOWER}.cpp") - message(STATUS "Using overrided ${OPT} source: ${src_directory}/${filename}.${OPT_LOWER}.cpp") + message(STATUS "Using overridden ${OPT} source: ${src_directory}/${filename}.${OPT_LOWER}.cpp") else() list(APPEND ${target_src_var} "${__file}") endif() diff --git a/cmake/OpenCVDetectApacheAnt.cmake b/cmake/OpenCVDetectApacheAnt.cmake index 2f8243838e..330ea9f756 100644 --- a/cmake/OpenCVDetectApacheAnt.cmake +++ b/cmake/OpenCVDetectApacheAnt.cmake @@ -27,7 +27,7 @@ if(ANT_EXECUTABLE) unset(ANT_EXECUTABLE CACHE) else() string(REGEX MATCH "[0-9]+.[0-9]+.[0-9]+" ANT_VERSION "${ANT_VERSION_FULL}") - set(ANT_VERSION "${ANT_VERSION}" CACHE INTERNAL "Detected ant vesion") + set(ANT_VERSION "${ANT_VERSION}" CACHE INTERNAL "Detected ant version") message(STATUS "Found apache ant: ${ANT_EXECUTABLE} (${ANT_VERSION})") endif() diff --git a/cmake/OpenCVDetectInferenceEngine.cmake b/cmake/OpenCVDetectInferenceEngine.cmake index 81454184f1..f2c7a03e9f 100644 --- a/cmake/OpenCVDetectInferenceEngine.cmake +++ b/cmake/OpenCVDetectInferenceEngine.cmake @@ -5,7 +5,7 @@ # # Detect parameters: # 1. Native cmake IE package: -# - enironment variable InferenceEngine_DIR is set to location of cmake module +# - environment variable InferenceEngine_DIR is set to location of cmake module # 2. Custom location: # - INF_ENGINE_INCLUDE_DIRS - headers search location # - INF_ENGINE_LIB_DIRS - library search location diff --git a/cmake/OpenCVDetectPython.cmake b/cmake/OpenCVDetectPython.cmake index c3c467002e..1b1fbf17b0 100644 --- a/cmake/OpenCVDetectPython.cmake +++ b/cmake/OpenCVDetectPython.cmake @@ -249,7 +249,7 @@ if(NOT ${found}) # Export return values set(${found} "${_found}" CACHE INTERNAL "") - set(${executable} "${_executable}" CACHE FILEPATH "Path to Python interpretor") + set(${executable} "${_executable}" CACHE FILEPATH "Path to Python interpreter") set(${version_string} "${_version_string}" CACHE INTERNAL "") set(${version_major} "${_version_major}" CACHE INTERNAL "") set(${version_minor} "${_version_minor}" CACHE INTERNAL "") diff --git a/cmake/OpenCVUtils.cmake b/cmake/OpenCVUtils.cmake index 9cba5d9a62..092deed811 100644 --- a/cmake/OpenCVUtils.cmake +++ b/cmake/OpenCVUtils.cmake @@ -781,7 +781,7 @@ macro(ocv_check_modules define) if(pkgcfg_lib_${define}_${_lib}) list(APPEND _libs "${pkgcfg_lib_${define}_${_lib}}") else() - message(WARNING "ocv_check_modules(${define}): can't find library '${_lib}'. Specify 'pkgcfg_lib_${define}_${_lib}' manualy") + message(WARNING "ocv_check_modules(${define}): can't find library '${_lib}'. Specify 'pkgcfg_lib_${define}_${_lib}' manually") list(APPEND _libs "${_lib}") endif() else() diff --git a/cmake/android/android_ant_projects.cmake b/cmake/android/android_ant_projects.cmake index c098b7aa7d..b422da7042 100644 --- a/cmake/android/android_ant_projects.cmake +++ b/cmake/android/android_ant_projects.cmake @@ -49,7 +49,7 @@ macro(android_get_compatible_target VAR) list(GET ANDROID_SDK_TARGETS 0 __lvl) string(REGEX MATCH "[0-9]+$" __lvl "${__lvl}") - #find minimal level mathing to all provided levels + #find minimal level matching to all provided levels foreach(lvl ${ARGN}) string(REGEX MATCH "[0-9]+$" __level "${lvl}") if(__level GREATER __lvl) diff --git a/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown b/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown index 1d8fa29ee8..a86bf11223 100644 --- a/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown +++ b/doc/js_tutorials/js_video/js_lucas_kanade/js_lucas_kanade.markdown @@ -13,7 +13,7 @@ Optical Flow ------------ Optical flow is the pattern of apparent motion of image objects between two consecutive frames -caused by the movemement of object or camera. It is 2D vector field where each vector is a +caused by the movement of object or camera. It is 2D vector field where each vector is a displacement vector showing the movement of points from first frame to second. Consider the image below (Image Courtesy: [Wikipedia article on Optical Flow](http://en.wikipedia.org/wiki/Optical_flow)). diff --git a/doc/tutorials/calib3d/real_time_pose/real_time_pose.markdown b/doc/tutorials/calib3d/real_time_pose/real_time_pose.markdown index 4347d11651..e05e6e11ac 100644 --- a/doc/tutorials/calib3d/real_time_pose/real_time_pose.markdown +++ b/doc/tutorials/calib3d/real_time_pose/real_time_pose.markdown @@ -253,8 +253,8 @@ Here is explained in detail the code for the real time application: @code{.cpp} RobustMatcher rmatcher; // instantiate RobustMatcher - cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints); // instatiate ORB feature detector - cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor(); // instatiate ORB descriptor extractor + cv::FeatureDetector * detector = new cv::OrbFeatureDetector(numKeyPoints); // instantiate ORB feature detector + cv::DescriptorExtractor * extractor = new cv::OrbDescriptorExtractor(); // instantiate ORB descriptor extractor rmatcher.setFeatureDetector(detector); // set feature detector rmatcher.setDescriptorExtractor(extractor); // set descriptor extractor diff --git a/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.markdown b/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.markdown index e7f865c3ce..d8961f5f96 100644 --- a/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.markdown +++ b/doc/tutorials/features2d/feature_flann_matcher/feature_flann_matcher.markdown @@ -29,8 +29,8 @@ This distance is equivalent to count the number of different elements for binary To filter the matches, Lowe proposed in @cite Lowe:2004:DIF:993451.996342 to use a distance ratio test to try to eliminate false matches. The distance ratio between the two nearest matches of a considered keypoint is computed and it is a good match when this value is below -a thresold. Indeed, this ratio allows helping to discriminate between ambiguous matches (distance ratio between the two nearest neighbors is -close to one) and well discriminated matches. The figure below from the SIFT paper illustrates the probability that a match is correct +a threshold. Indeed, this ratio allows helping to discriminate between ambiguous matches (distance ratio between the two nearest neighbors +is close to one) and well discriminated matches. The figure below from the SIFT paper illustrates the probability that a match is correct based on the nearest-neighbor distance ratio test. ![](images/Feature_FlannMatcher_Lowe_ratio_test.png) diff --git a/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown b/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown index f84b96e031..cb76af6677 100644 --- a/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown +++ b/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown @@ -39,7 +39,7 @@ With G-API, we can define it as follows: It is important to understand that the new G-API based version of calcGST() will just produce a compute graph, in contrast to its original version, which actually calculates the values. This is a -principial difference -- G-API based functions like this are used to +principal difference -- G-API based functions like this are used to construct graphs, not to process the actual data. Let's start implementing calcGST() with calculation of \f$J\f$ @@ -186,7 +186,7 @@ is also OpenCV-based since it fallbacks to OpenCV functions inside. On GNU/Linux, application memory footprint can be profiled with [Valgrind](http://valgrind.org/). On Debian/Ubuntu systems it can be -installed like this (assuming you have administrator priveleges): +installed like this (assuming you have administrator privileges): $ sudo apt-get install valgrind massif-visualizer @@ -239,10 +239,10 @@ consumption is because the default naive OpenCV-based backend is used to execute this graph. This backend serves mostly for quick prototyping and debugging algorithms before offload/further optimization. -This backend doesn't utilize any complex memory mamagement strategies yet +This backend doesn't utilize any complex memory management strategies yet since it is not its point at the moment. In the following chapter, we'll learn about Fluid backend and see how the same G-API code can -run in a completely different model (and the footprint shrinked to a +run in a completely different model (and the footprint shrunk to a number of kilobytes). # Backends and kernels {#gapi_anisotropic_backends} @@ -298,7 +298,7 @@ as a _graph compilation option_: @snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi_fluid.cpp kernel_pkg_use -Traditional OpenCV is logically divided into modules, whith every +Traditional OpenCV is logically divided into modules, with every module providing a set of functions. In G-API, there are also "modules" which are represented as kernel packages provided by a particular backend. In this example, we pass Fluid kernel packages to @@ -375,7 +375,7 @@ left side of the dump) is easily noticeable. The visualization reflects how G-API deals with mixed graphs, also called _heterogeneous_ graphs. The majority of operations in this graph are implemented with Fluid backend, but Box filters are executed -by the OpenCV backend. One can easily see that the graph is partioned +by the OpenCV backend. One can easily see that the graph is partitioned (with rectangles). G-API groups connected operations based on their affinity, forming _subgraphs_ (or _islands_ in G-API terminology), and our top-level graph becomes a composition of multiple smaller diff --git a/doc/tutorials/imgcodecs/raster-gdal/raster_io_gdal.markdown b/doc/tutorials/imgcodecs/raster-gdal/raster_io_gdal.markdown index 2193d26870..432caa69e0 100644 --- a/doc/tutorials/imgcodecs/raster-gdal/raster_io_gdal.markdown +++ b/doc/tutorials/imgcodecs/raster-gdal/raster_io_gdal.markdown @@ -15,7 +15,7 @@ The primary objectives for this tutorial: - How to use OpenCV [imread](@ref imread) to load satellite imagery. - How to use OpenCV [imread](@ref imread) to load SRTM Digital Elevation Models -- Given the corner coordinates of both the image and DEM, correllate the elevation data to the +- Given the corner coordinates of both the image and DEM, correlate the elevation data to the image to find elevations for each pixel. - Show a basic, easy-to-implement example of a terrain heat map. - Show a basic use of DEM data coupled with ortho-rectified imagery. diff --git a/doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.markdown b/doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.markdown index 1bfb5f1f27..a03f95b6e4 100644 --- a/doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.markdown +++ b/doc/tutorials/imgproc/gausian_median_blur_bilateral_filter/gausian_median_blur_bilateral_filter.markdown @@ -157,7 +157,7 @@ already known by now. - *src*: Source image - *dst*: Destination image - *Size(w, h)*: The size of the kernel to be used (the neighbors to be considered). \f$w\f$ and - \f$h\f$ have to be odd and positive numbers otherwise thi size will be calculated using the + \f$h\f$ have to be odd and positive numbers otherwise the size will be calculated using the \f$\sigma_{x}\f$ and \f$\sigma_{y}\f$ arguments. - \f$\sigma_{x}\f$: The standard deviation in x. Writing \f$0\f$ implies that \f$\sigma_{x}\f$ is calculated using kernel size. diff --git a/doc/tutorials/imgproc/morph_lines_detection/morph_lines_detection.md b/doc/tutorials/imgproc/morph_lines_detection/morph_lines_detection.md index 4b0d3fae60..ce9e81e211 100644 --- a/doc/tutorials/imgproc/morph_lines_detection/morph_lines_detection.md +++ b/doc/tutorials/imgproc/morph_lines_detection/morph_lines_detection.md @@ -30,7 +30,7 @@ Two of the most basic morphological operations are dilation and erosion. Dilatio ![Dilation on a Grayscale Image](images/morph6.gif) -- __Erosion__: The vise versa applies for the erosion operation. The value of the output pixel is the minimum value of all the pixels that fall within the structuring element's size and shape. Look the at the example figures below: +- __Erosion__: The vice versa applies for the erosion operation. The value of the output pixel is the minimum value of all the pixels that fall within the structuring element's size and shape. Look the at the example figures below: ![Erosion on a Binary Image](images/morph211.png) diff --git a/doc/tutorials/introduction/transition_guide/transition_guide.markdown b/doc/tutorials/introduction/transition_guide/transition_guide.markdown index 67d0f6f12a..2151e7b2d4 100644 --- a/doc/tutorials/introduction/transition_guide/transition_guide.markdown +++ b/doc/tutorials/introduction/transition_guide/transition_guide.markdown @@ -189,7 +189,7 @@ brief->compute(gray, query_kpts, query_desc); //Compute brief descriptors at eac OpenCL {#tutorial_transition_hints_opencl} ------ -All specialized `ocl` implemetations has been hidden behind general C++ algorithm interface. Now the function execution path can be selected dynamically at runtime: CPU or OpenCL; this mechanism is also called "Transparent API". +All specialized `ocl` implementations has been hidden behind general C++ algorithm interface. Now the function execution path can be selected dynamically at runtime: CPU or OpenCL; this mechanism is also called "Transparent API". New class cv::UMat is intended to hide data exchange with OpenCL device in a convenient way. diff --git a/doc/tutorials/ios/video_processing/video_processing.markdown b/doc/tutorials/ios/video_processing/video_processing.markdown index 2776219335..43b0b96338 100644 --- a/doc/tutorials/ios/video_processing/video_processing.markdown +++ b/doc/tutorials/ios/video_processing/video_processing.markdown @@ -101,7 +101,7 @@ using namespace cv; } @endcode In this case, we initialize the camera and provide the imageView as a target for rendering each -frame. CvVideoCamera is basically a wrapper around AVFoundation, so we provie as properties some of +frame. CvVideoCamera is basically a wrapper around AVFoundation, so we provide as properties some of the AVFoundation camera options. For example we want to use the front camera, set the video size to 352x288 and a video orientation (the video camera normally outputs in landscape mode, which results in transposed data when you design a portrait application). diff --git a/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown b/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown index 8470e06486..e7f2ffaec8 100644 --- a/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown +++ b/doc/tutorials/ml/non_linear_svms/non_linear_svms.markdown @@ -13,7 +13,7 @@ In this tutorial you will learn how to: Motivation ---------- -Why is it interesting to extend the SVM optimation problem in order to handle non-linearly separable +Why is it interesting to extend the SVM optimization problem in order to handle non-linearly separable training data? Most of the applications in which SVMs are used in computer vision require a more powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the training data can be rarely separated using an hyperplane__. diff --git a/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.markdown b/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.markdown index 122c1ca0f1..33d5a95ff4 100644 --- a/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.markdown +++ b/doc/tutorials/objdetect/cascade_classifier/cascade_classifier.markdown @@ -113,7 +113,7 @@ This tutorial code's is shown lines below. You can also download it from Result ------ --# Here is the result of running the code above and using as input the video stream of a build-in +-# Here is the result of running the code above and using as input the video stream of a built-in webcam: ![](images/Cascade_Classifier_Tutorial_Result_Haar.jpg) diff --git a/doc/tutorials/video/optical_flow/optical_flow.markdown b/doc/tutorials/video/optical_flow/optical_flow.markdown index 8b1d130238..d4809761bd 100644 --- a/doc/tutorials/video/optical_flow/optical_flow.markdown +++ b/doc/tutorials/video/optical_flow/optical_flow.markdown @@ -15,7 +15,7 @@ Optical Flow ------------ Optical flow is the pattern of apparent motion of image objects between two consecutive frames -caused by the movemement of object or camera. It is 2D vector field where each vector is a +caused by the movement of object or camera. It is 2D vector field where each vector is a displacement vector showing the movement of points from first frame to second. Consider the image below (Image Courtesy: [Wikipedia article on Optical Flow](http://en.wikipedia.org/wiki/Optical_flow)). diff --git a/samples/cpp/stitching_detailed.cpp b/samples/cpp/stitching_detailed.cpp index 69221691d2..e94c68cd6d 100644 --- a/samples/cpp/stitching_detailed.cpp +++ b/samples/cpp/stitching_detailed.cpp @@ -622,7 +622,7 @@ int main(int argc, char* argv[]) vector sizes(num_images); vector masks(num_images); - // Preapre images masks + // Prepare images masks for (int i = 0; i < num_images; ++i) { masks[i].create(images[i].size(), CV_8U); diff --git a/samples/cpp/videocapture_gphoto2_autofocus.cpp b/samples/cpp/videocapture_gphoto2_autofocus.cpp index d0baa315bf..6e635ee7ca 100644 --- a/samples/cpp/videocapture_gphoto2_autofocus.cpp +++ b/samples/cpp/videocapture_gphoto2_autofocus.cpp @@ -41,7 +41,7 @@ const int MAX_FOCUS_STEP = 32767; const int FOCUS_DIRECTION_INFTY = 1; const int DEFAULT_BREAK_LIMIT = 5; const int DEFAULT_OUTPUT_FPS = 20; -const double epsylon = 0.0005; // compression, noice, etc. +const double epsylon = 0.0005; // compression, noise, etc. struct Args_t { diff --git a/samples/directx/d3d11_interop.cpp b/samples/directx/d3d11_interop.cpp index d00f7cffb8..df40dd3e89 100644 --- a/samples/directx/d3d11_interop.cpp +++ b/samples/directx/d3d11_interop.cpp @@ -83,7 +83,7 @@ public: r = m_pD3D11SwapChain->GetBuffer(0, __uuidof(ID3D11Texture2D), (LPVOID*)&m_pBackBuffer); if (FAILED(r)) { - throw std::runtime_error("GetBufer() failed!"); + throw std::runtime_error("GetBuffer() failed!"); } r = m_pD3D11Dev->CreateRenderTargetView(m_pBackBuffer, NULL, &m_pRenderTarget); diff --git a/samples/dnn/face_detector/how_to_train_face_detector.txt b/samples/dnn/face_detector/how_to_train_face_detector.txt index 11602297f2..3f6b1ba42d 100644 --- a/samples/dnn/face_detector/how_to_train_face_detector.txt +++ b/samples/dnn/face_detector/how_to_train_face_detector.txt @@ -67,7 +67,7 @@ You need to prepare 2 LMDB databases: one for training images, one for validatio 3. Train your detector For training you need to have 3 files: train.prototxt, test.prototxt and solver.prototxt. You can find these files in the same directory as for this readme. -Also you need to edit train.prototxt and test.prototxt to replace paths for your LMDB databases to actual databases you've crated in step 2. +Also you need to edit train.prototxt and test.prototxt to replace paths for your LMDB databases to actual databases you've created in step 2. Now all is done for launch training process. Execute next lines in Terminal: diff --git a/samples/dnn/openpose.py b/samples/dnn/openpose.py index e6bb1ba05a..b79ccd54b8 100644 --- a/samples/dnn/openpose.py +++ b/samples/dnn/openpose.py @@ -88,7 +88,7 @@ while cv.waitKey(1) < 0: points = [] for i in range(len(BODY_PARTS)): - # Slice heatmap of corresponging body's part. + # Slice heatmap of corresponding body's part. heatMap = out[0, i, :, :] # Originally, we try to find all the local maximums. To simplify a sample diff --git a/samples/opencl/opencl-opencv-interop.cpp b/samples/opencl/opencl-opencv-interop.cpp index d3b15668a9..f648f78bf8 100644 --- a/samples/opencl/opencl-opencv-interop.cpp +++ b/samples/opencl/opencl-opencv-interop.cpp @@ -703,7 +703,7 @@ int App::process_frame_with_open_cl(cv::Mat& frame, bool use_buffer, cl_mem* mem if (0 == mem || 0 == m_img_src) { // allocate/delete cl memory objects every frame for the simplicity. - // in real applicaton more efficient pipeline can be built. + // in real application more efficient pipeline can be built. if (use_buffer) { diff --git a/samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py b/samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py index d54d93c7fc..4905a2e13a 100644 --- a/samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py +++ b/samples/python/tutorial_code/imgProc/threshold_inRange/threshold_inRange.py @@ -66,7 +66,7 @@ def on_high_V_thresh_trackbar(val): cv.setTrackbarPos(high_V_name, window_detection_name, high_V) parser = argparse.ArgumentParser(description='Code for Thresholding Operations using inRange tutorial.') -parser.add_argument('--camera', help='Camera devide number.', default=0, type=int) +parser.add_argument('--camera', help='Camera divide number.', default=0, type=int) args = parser.parse_args() ## [cap] diff --git a/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py b/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py index 5ac5575a9e..d9fb460c0e 100644 --- a/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py +++ b/samples/python/tutorial_code/objectDetection/cascade_classifier/objectDetection.py @@ -25,7 +25,7 @@ def detectAndDisplay(frame): parser = argparse.ArgumentParser(description='Code for Cascade Classifier tutorial.') parser.add_argument('--face_cascade', help='Path to face cascade.', default='data/haarcascades/haarcascade_frontalface_alt.xml') parser.add_argument('--eyes_cascade', help='Path to eyes cascade.', default='data/haarcascades/haarcascade_eye_tree_eyeglasses.xml') -parser.add_argument('--camera', help='Camera devide number.', type=int, default=0) +parser.add_argument('--camera', help='Camera divide number.', type=int, default=0) args = parser.parse_args() face_cascade_name = args.face_cascade