diff --git a/doc/js_tutorials/js_gui/js_trackbar/js_trackbar.markdown b/doc/js_tutorials/js_gui/js_trackbar/js_trackbar.markdown index ed1c021f62..5512d441af 100644 --- a/doc/js_tutorials/js_gui/js_trackbar/js_trackbar.markdown +++ b/doc/js_tutorials/js_gui/js_trackbar/js_trackbar.markdown @@ -36,7 +36,7 @@ let x = document.getElementById('myRange'); @endcode As a trackbar, the range element need a trackbar name, the default value, minimum value, maximum value, -step and the callback function which is executed everytime trackbar value changes. The callback function +step and the callback function which is executed every time trackbar value changes. The callback function always has a default argument, which is the trackbar position. Additionally, a text element to display the trackbar value is fine. In our case, we can create the trackbar as below: @code{.html} diff --git a/doc/py_tutorials/py_gui/py_trackbar/py_trackbar.markdown b/doc/py_tutorials/py_gui/py_trackbar/py_trackbar.markdown index d6af059903..c55515c76d 100644 --- a/doc/py_tutorials/py_gui/py_trackbar/py_trackbar.markdown +++ b/doc/py_tutorials/py_gui/py_trackbar/py_trackbar.markdown @@ -16,7 +16,7 @@ correspondingly window color changes. By default, initial color will be set to B For cv.getTrackbarPos() function, first argument is the trackbar name, second one is the window name to which it is attached, third argument is the default value, fourth one is the maximum value -and fifth one is the callback function which is executed everytime trackbar value changes. The +and fifth one is the callback function which is executed every time trackbar value changes. The callback function always has a default argument which is the trackbar position. In our case, function does nothing, so we simply pass. diff --git a/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown b/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown index 1ef8443306..0b4c2bd744 100644 --- a/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown +++ b/doc/py_tutorials/py_ml/py_knn/py_knn_opencv/py_knn_opencv.markdown @@ -54,7 +54,7 @@ print( accuracy ) @endcode So our basic OCR app is ready. This particular example gave me an accuracy of 91%. One option improve accuracy is to add more data for training, especially the wrong ones. So instead of finding -this training data everytime I start application, I better save it, so that next time, I directly +this training data every time I start application, I better save it, so that next time, I directly read this data from a file and start classification. You can do it with the help of some Numpy functions like np.savetxt, np.savez, np.load etc. Please check their docs for more details. @code{.py} diff --git a/doc/tutorials/imgproc/random_generator_and_text/random_generator_and_text.markdown b/doc/tutorials/imgproc/random_generator_and_text/random_generator_and_text.markdown index d2f10214ea..f588bbc44d 100644 --- a/doc/tutorials/imgproc/random_generator_and_text/random_generator_and_text.markdown +++ b/doc/tutorials/imgproc/random_generator_and_text/random_generator_and_text.markdown @@ -210,12 +210,12 @@ Explanation @code{.cpp} image2 = image - Scalar::all(i) @endcode - So, **image2** is the substraction of **image** and **Scalar::all(i)**. In fact, what happens - here is that every pixel of **image2** will be the result of substracting every pixel of + So, **image2** is the subtraction of **image** and **Scalar::all(i)**. In fact, what happens + here is that every pixel of **image2** will be the result of subtracting every pixel of **image** minus the value of **i** (remember that for each pixel we are considering three values such as R, G and B, so each of them will be affected) - Also remember that the substraction operation *always* performs internally a **saturate** + Also remember that the subtraction operation *always* performs internally a **saturate** operation, which means that the result obtained will always be inside the allowed range (no negative and between 0 and 255 for our example). diff --git a/modules/core/include/opencv2/core/hal/msa_macros.h b/modules/core/include/opencv2/core/hal/msa_macros.h index 97e4f4bb4a..3ed6e58d3c 100755 --- a/modules/core/include/opencv2/core/hal/msa_macros.h +++ b/modules/core/include/opencv2/core/hal/msa_macros.h @@ -502,7 +502,7 @@ typedef double v1f64 __attribute__ ((vector_size(8), aligned(8))); (v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)__e, 31), (v4i32)__builtin_msa_sat_u_d((v2u64)__d, 31)); \ }) -/* Minimum values between corresponding elements in the two vectors are written to teh returned vector. */ +/* Minimum values between corresponding elements in the two vectors are written to the returned vector. */ #define msa_minq_s8(__a, __b) (__builtin_msa_min_s_b(__a, __b)) #define msa_minq_s16(__a, __b) (__builtin_msa_min_s_h(__a, __b)) #define msa_minq_s32(__a, __b) (__builtin_msa_min_s_w(__a, __b)) @@ -514,7 +514,7 @@ typedef double v1f64 __attribute__ ((vector_size(8), aligned(8))); #define msa_minq_f32(__a, __b) (__builtin_msa_fmin_w(__a, __b)) #define msa_minq_f64(__a, __b) (__builtin_msa_fmin_d(__a, __b)) -/* Maximum values between corresponding elements in the two vectors are written to teh returned vector. */ +/* Maximum values between corresponding elements in the two vectors are written to the returned vector. */ #define msa_maxq_s8(__a, __b) (__builtin_msa_max_s_b(__a, __b)) #define msa_maxq_s16(__a, __b) (__builtin_msa_max_s_h(__a, __b)) #define msa_maxq_s32(__a, __b) (__builtin_msa_max_s_w(__a, __b)) diff --git a/modules/dnn/src/cuda/concat.cu b/modules/dnn/src/cuda/concat.cu index 21e542f062..87e72e1a87 100644 --- a/modules/dnn/src/cuda/concat.cu +++ b/modules/dnn/src/cuda/concat.cu @@ -82,7 +82,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { { CV_Assert(is_fully_aligned(output, N)); CV_Assert(is_fully_aligned(input, N)); - /* more assertions are required to fully check for vectorization possiblity; check concat() */ + /* more assertions are required to fully check for vectorization possibility; check concat() */ auto kernel = raw::concat_vec; auto policy = make_policy(kernel, input.size() / N, 0, stream); @@ -168,7 +168,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { CV_Assert(output.rank() == input.rank()); CV_Assert(output.rank() == offsets.size()); - /* squeezable axes at the begining of both tensors can be eliminated + /* squeezable axes at the beginning of both tensors can be eliminated * * Reasoning: * ---------- diff --git a/modules/dnn/src/cuda/padding.cu b/modules/dnn/src/cuda/padding.cu index d8f481205d..ed73b04577 100644 --- a/modules/dnn/src/cuda/padding.cu +++ b/modules/dnn/src/cuda/padding.cu @@ -103,7 +103,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { CV_Assert(output.rank() == input.rank()); CV_Assert(output.rank() == ranges.size()); - /* squeezable axes at the begining of both tensors can be eliminated + /* squeezable axes at the beginning of both tensors can be eliminated * * Reasoning: * ---------- diff --git a/modules/dnn/src/cuda/permute.cu b/modules/dnn/src/cuda/permute.cu index 7d0ffe86db..3643ad5d25 100644 --- a/modules/dnn/src/cuda/permute.cu +++ b/modules/dnn/src/cuda/permute.cu @@ -83,7 +83,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { CV_Assert(input.rank() == order.size()); CV_Assert(input.size() == output.size()); - /* squeezable axes at the begining of both tensors which aren't permuted can be eliminated + /* squeezable axes at the beginning of both tensors which aren't permuted can be eliminated * * Reasoning: * ---------- diff --git a/modules/dnn/src/cuda/slice.cu b/modules/dnn/src/cuda/slice.cu index a6e3a94e9b..27a166f36b 100644 --- a/modules/dnn/src/cuda/slice.cu +++ b/modules/dnn/src/cuda/slice.cu @@ -79,7 +79,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels { CV_Assert(output.rank() == input.rank()); CV_Assert(output.rank() == offsets.size()); - /* squeezable axes at the begining of both tensors can be eliminated + /* squeezable axes at the beginning of both tensors can be eliminated * * Reasoning: * ---------- diff --git a/modules/dnn/src/cuda4dnn/csl/cudnn/cudnn.hpp b/modules/dnn/src/cuda4dnn/csl/cudnn/cudnn.hpp index 59d19896af..06879448d7 100644 --- a/modules/dnn/src/cuda4dnn/csl/cudnn/cudnn.hpp +++ b/modules/dnn/src/cuda4dnn/csl/cudnn/cudnn.hpp @@ -218,7 +218,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu * * cuDNN frequently assumes that the first axis is the batch axis and the * second axis is the channel axis; hence, we copy the shape of a lower rank - * tensor to the begining of `dims` + * tensor to the beginning of `dims` */ std::copy(start, end, std::begin(dims)); diff --git a/modules/dnn/src/cuda4dnn/csl/tensor.hpp b/modules/dnn/src/cuda4dnn/csl/tensor.hpp index eef69df5fe..b01d803208 100644 --- a/modules/dnn/src/cuda4dnn/csl/tensor.hpp +++ b/modules/dnn/src/cuda4dnn/csl/tensor.hpp @@ -53,7 +53,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { * "TensorType" is used when only meta-information such as the size or shape is required, i.e. the data won't be touched */ - /** if the \p axis is a negative index, the equivalent postive index is returned; otherwise, returns \p axis */ + /** if the \p axis is a negative index, the equivalent positive index is returned; otherwise, returns \p axis */ CUDA4DNN_HOST_DEVICE constexpr std::size_t clamp_axis(int axis, std::size_t rank) { return axis < 0 ? axis + rank : axis; } diff --git a/modules/dnn/src/cuda4dnn/primitives/normalize_bbox.hpp b/modules/dnn/src/cuda4dnn/primitives/normalize_bbox.hpp index a61ab99538..ecef608647 100644 --- a/modules/dnn/src/cuda4dnn/primitives/normalize_bbox.hpp +++ b/modules/dnn/src/cuda4dnn/primitives/normalize_bbox.hpp @@ -41,7 +41,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { /* 1 for L1 norm, 2 for L2 norm */ std::size_t norm; - /* epsilon to use to avoid divison by zero */ + /* epsilon to use to avoid division by zero */ T eps; }; diff --git a/modules/dnn/src/cuda4dnn/primitives/pooling.hpp b/modules/dnn/src/cuda4dnn/primitives/pooling.hpp index 8b8cf37aed..544d8110cc 100644 --- a/modules/dnn/src/cuda4dnn/primitives/pooling.hpp +++ b/modules/dnn/src/cuda4dnn/primitives/pooling.hpp @@ -168,7 +168,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { * copying the input to a bigger tensor and padding the ends manually * * But we first try to avoid the transformation using cuDNN's flexibility. cuDNN can accept a smaller or - * a bigger output shape. This effectively allows us to have arbitary padding at the right. + * a bigger output shape. This effectively allows us to have arbitrary padding at the right. */ if (std::any_of(std::begin(padding_left), std::end(padding_left), is_not_zero)) { diff --git a/modules/dnn/src/op_cuda.hpp b/modules/dnn/src/op_cuda.hpp index 5a106f352b..1cf3890fe0 100644 --- a/modules/dnn/src/op_cuda.hpp +++ b/modules/dnn/src/op_cuda.hpp @@ -65,7 +65,7 @@ namespace cv { namespace dnn { * \param[out] destTensor destination tensor * \param stream CUDA stream to use for the memory transfer * - * The memory copy starts from begining \p srcMat. The number of elements copied is + * The memory copy starts from beginning \p srcMat. The number of elements copied is * equal to the number of elements in \p destTensor. * * Pre-conditions: diff --git a/modules/gapi/test/common/gapi_core_tests.hpp b/modules/gapi/test/common/gapi_core_tests.hpp index fab4f6bbbd..d1bfa6aa9d 100644 --- a/modules/gapi/test/common/gapi_core_tests.hpp +++ b/modules/gapi/test/common/gapi_core_tests.hpp @@ -73,7 +73,7 @@ inline std::ostream& operator<<(std::ostream& os, bitwiseOp op) // 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function // - available in test body // 2. Input/output matrices will be initialized by initMatsRandU (in this fixture) -// 3. Specific parameters: opType, testWithScalar, scale, doReverseOp of correponding types +// 3. Specific parameters: opType, testWithScalar, scale, doReverseOp of corresponding types // - created (and initialized) automatically // - available in test body // Note: all parameter _values_ (e.g. type CV_8UC3) are set via INSTANTIATE_TEST_CASE_P macro diff --git a/modules/gapi/test/common/gapi_imgproc_tests.hpp b/modules/gapi/test/common/gapi_imgproc_tests.hpp index f684511f0b..11104a455b 100644 --- a/modules/gapi/test/common/gapi_imgproc_tests.hpp +++ b/modules/gapi/test/common/gapi_imgproc_tests.hpp @@ -25,7 +25,7 @@ namespace opencv_test // 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function // - available in test body // 2. Input/output matrices will be initialized by initMatrixRandN (in this fixture) -// 3. Specific parameters: cmpF, kernSize, borderType of correponding types +// 3. Specific parameters: cmpF, kernSize, borderType of corresponding types // - created (and initialized) automatically // - available in test body // Note: all parameter _values_ (e.g. type CV_8UC3) are set via INSTANTIATE_TEST_CASE_P macro diff --git a/modules/gapi/test/common/gapi_operators_tests.hpp b/modules/gapi/test/common/gapi_operators_tests.hpp index c5dc53b0ba..70bf477ac8 100644 --- a/modules/gapi/test/common/gapi_operators_tests.hpp +++ b/modules/gapi/test/common/gapi_operators_tests.hpp @@ -195,7 +195,7 @@ g_api_ocv_pair_mat_mat opXor = {std::string{"operator^"}, // 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function // - available in test body // 2. Input/output matrices will be initialized by initMatsRandU (in this fixture) -// 3. Specific parameters: cmpF, op of correponding types +// 3. Specific parameters: cmpF, op of corresponding types // - created (and initialized) automatically // - available in test body // Note: all parameter _values_ (e.g. type CV_8UC3) are set via INSTANTIATE_TEST_CASE_P macro diff --git a/modules/gapi/test/streaming/gapi_streaming_tests.cpp b/modules/gapi/test/streaming/gapi_streaming_tests.cpp index 216258d524..d7fe3c05f0 100644 --- a/modules/gapi/test/streaming/gapi_streaming_tests.cpp +++ b/modules/gapi/test/streaming/gapi_streaming_tests.cpp @@ -540,7 +540,7 @@ TEST(GAPI_Streaming_Types, XChangeVector) auto fluid_kernels = cv::gapi::core::fluid::kernels(); fluid_kernels.include(); - // Here OCV takes precedense over Fluid, whith SubC & SumV remaining + // Here OCV takes precedense over Fluid, with SubC & SumV remaining // in Fluid. auto kernels = cv::gapi::combine(fluid_kernels, ocv_kernels); diff --git a/modules/imgproc/test/test_watershed.cpp b/modules/imgproc/test/test_watershed.cpp index 88518aa62a..90307ca30c 100644 --- a/modules/imgproc/test/test_watershed.cpp +++ b/modules/imgproc/test/test_watershed.cpp @@ -83,7 +83,7 @@ void CV_WatershedTest::run( int /* start_from */) Point* p = (Point*)cvGetSeqElem(cnts, 0); //expected image was added with 1 in order to save to png - //so now we substract 1 to get real color + //so now we subtract 1 to get real color if(!exp.empty()) colors.push_back(exp.ptr(p->y)[p->x] - 1); }