Fix some typos

This commit is contained in:
Brian Wignall 2019-11-25 19:55:07 -05:00
parent ad0ab4109a
commit 9276f1910b
19 changed files with 23 additions and 23 deletions

View File

@ -210,12 +210,12 @@ Explanation
@code{.cpp} @code{.cpp}
image2 = image - Scalar::all(i) image2 = image - Scalar::all(i)
@endcode @endcode
So, **image2** is the substraction of **image** and **Scalar::all(i)**. In fact, what happens So, **image2** is the subtraction of **image** and **Scalar::all(i)**. In fact, what happens
here is that every pixel of **image2** will be the result of substracting every pixel of here is that every pixel of **image2** will be the result of subtracting every pixel of
**image** minus the value of **i** (remember that for each pixel we are considering three values **image** minus the value of **i** (remember that for each pixel we are considering three values
such as R, G and B, so each of them will be affected) such as R, G and B, so each of them will be affected)
Also remember that the substraction operation *always* performs internally a **saturate** Also remember that the subtraction operation *always* performs internally a **saturate**
operation, which means that the result obtained will always be inside the allowed range (no operation, which means that the result obtained will always be inside the allowed range (no
negative and between 0 and 255 for our example). negative and between 0 and 255 for our example).

View File

@ -502,7 +502,7 @@ typedef double v1f64 __attribute__ ((vector_size(8), aligned(8)));
(v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)__e, 31), (v4i32)__builtin_msa_sat_u_d((v2u64)__d, 31)); \ (v4u32)__builtin_msa_pckev_w((v4i32)__builtin_msa_sat_u_d((v2u64)__e, 31), (v4i32)__builtin_msa_sat_u_d((v2u64)__d, 31)); \
}) })
/* Minimum values between corresponding elements in the two vectors are written to teh returned vector. */ /* Minimum values between corresponding elements in the two vectors are written to the returned vector. */
#define msa_minq_s8(__a, __b) (__builtin_msa_min_s_b(__a, __b)) #define msa_minq_s8(__a, __b) (__builtin_msa_min_s_b(__a, __b))
#define msa_minq_s16(__a, __b) (__builtin_msa_min_s_h(__a, __b)) #define msa_minq_s16(__a, __b) (__builtin_msa_min_s_h(__a, __b))
#define msa_minq_s32(__a, __b) (__builtin_msa_min_s_w(__a, __b)) #define msa_minq_s32(__a, __b) (__builtin_msa_min_s_w(__a, __b))
@ -514,7 +514,7 @@ typedef double v1f64 __attribute__ ((vector_size(8), aligned(8)));
#define msa_minq_f32(__a, __b) (__builtin_msa_fmin_w(__a, __b)) #define msa_minq_f32(__a, __b) (__builtin_msa_fmin_w(__a, __b))
#define msa_minq_f64(__a, __b) (__builtin_msa_fmin_d(__a, __b)) #define msa_minq_f64(__a, __b) (__builtin_msa_fmin_d(__a, __b))
/* Maximum values between corresponding elements in the two vectors are written to teh returned vector. */ /* Maximum values between corresponding elements in the two vectors are written to the returned vector. */
#define msa_maxq_s8(__a, __b) (__builtin_msa_max_s_b(__a, __b)) #define msa_maxq_s8(__a, __b) (__builtin_msa_max_s_b(__a, __b))
#define msa_maxq_s16(__a, __b) (__builtin_msa_max_s_h(__a, __b)) #define msa_maxq_s16(__a, __b) (__builtin_msa_max_s_h(__a, __b))
#define msa_maxq_s32(__a, __b) (__builtin_msa_max_s_w(__a, __b)) #define msa_maxq_s32(__a, __b) (__builtin_msa_max_s_w(__a, __b))

View File

@ -82,7 +82,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
{ {
CV_Assert(is_fully_aligned<T>(output, N)); CV_Assert(is_fully_aligned<T>(output, N));
CV_Assert(is_fully_aligned<T>(input, N)); CV_Assert(is_fully_aligned<T>(input, N));
/* more assertions are required to fully check for vectorization possiblity; check concat() */ /* more assertions are required to fully check for vectorization possibility; check concat() */
auto kernel = raw::concat_vec<T, N>; auto kernel = raw::concat_vec<T, N>;
auto policy = make_policy(kernel, input.size() / N, 0, stream); auto policy = make_policy(kernel, input.size() / N, 0, stream);
@ -168,7 +168,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
CV_Assert(output.rank() == input.rank()); CV_Assert(output.rank() == input.rank());
CV_Assert(output.rank() == offsets.size()); CV_Assert(output.rank() == offsets.size());
/* squeezable axes at the begining of both tensors can be eliminated /* squeezable axes at the beginning of both tensors can be eliminated
* *
* Reasoning: * Reasoning:
* ---------- * ----------

View File

@ -103,7 +103,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
CV_Assert(output.rank() == input.rank()); CV_Assert(output.rank() == input.rank());
CV_Assert(output.rank() == ranges.size()); CV_Assert(output.rank() == ranges.size());
/* squeezable axes at the begining of both tensors can be eliminated /* squeezable axes at the beginning of both tensors can be eliminated
* *
* Reasoning: * Reasoning:
* ---------- * ----------

View File

@ -83,7 +83,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
CV_Assert(input.rank() == order.size()); CV_Assert(input.rank() == order.size());
CV_Assert(input.size() == output.size()); CV_Assert(input.size() == output.size());
/* squeezable axes at the begining of both tensors which aren't permuted can be eliminated /* squeezable axes at the beginning of both tensors which aren't permuted can be eliminated
* *
* Reasoning: * Reasoning:
* ---------- * ----------

View File

@ -79,7 +79,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace kernels {
CV_Assert(output.rank() == input.rank()); CV_Assert(output.rank() == input.rank());
CV_Assert(output.rank() == offsets.size()); CV_Assert(output.rank() == offsets.size());
/* squeezable axes at the begining of both tensors can be eliminated /* squeezable axes at the beginning of both tensors can be eliminated
* *
* Reasoning: * Reasoning:
* ---------- * ----------

View File

@ -218,7 +218,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl { namespace cu
* *
* cuDNN frequently assumes that the first axis is the batch axis and the * cuDNN frequently assumes that the first axis is the batch axis and the
* second axis is the channel axis; hence, we copy the shape of a lower rank * second axis is the channel axis; hence, we copy the shape of a lower rank
* tensor to the begining of `dims` * tensor to the beginning of `dims`
*/ */
std::copy(start, end, std::begin(dims)); std::copy(start, end, std::begin(dims));

View File

@ -53,7 +53,7 @@ namespace cv { namespace dnn { namespace cuda4dnn { namespace csl {
* "TensorType" is used when only meta-information such as the size or shape is required, i.e. the data won't be touched * "TensorType" is used when only meta-information such as the size or shape is required, i.e. the data won't be touched
*/ */
/** if the \p axis is a negative index, the equivalent postive index is returned; otherwise, returns \p axis */ /** if the \p axis is a negative index, the equivalent positive index is returned; otherwise, returns \p axis */
CUDA4DNN_HOST_DEVICE constexpr std::size_t clamp_axis(int axis, std::size_t rank) { CUDA4DNN_HOST_DEVICE constexpr std::size_t clamp_axis(int axis, std::size_t rank) {
return axis < 0 ? axis + rank : axis; return axis < 0 ? axis + rank : axis;
} }

View File

@ -41,7 +41,7 @@ namespace cv { namespace dnn { namespace cuda4dnn {
/* 1 for L1 norm, 2 for L2 norm */ /* 1 for L1 norm, 2 for L2 norm */
std::size_t norm; std::size_t norm;
/* epsilon to use to avoid divison by zero */ /* epsilon to use to avoid division by zero */
T eps; T eps;
}; };

View File

@ -168,7 +168,7 @@ namespace cv { namespace dnn { namespace cuda4dnn {
* copying the input to a bigger tensor and padding the ends manually * copying the input to a bigger tensor and padding the ends manually
* *
* But we first try to avoid the transformation using cuDNN's flexibility. cuDNN can accept a smaller or * But we first try to avoid the transformation using cuDNN's flexibility. cuDNN can accept a smaller or
* a bigger output shape. This effectively allows us to have arbitary padding at the right. * a bigger output shape. This effectively allows us to have arbitrary padding at the right.
*/ */
if (std::any_of(std::begin(padding_left), std::end(padding_left), is_not_zero)) if (std::any_of(std::begin(padding_left), std::end(padding_left), is_not_zero))
{ {

View File

@ -65,7 +65,7 @@ namespace cv { namespace dnn {
* \param[out] destTensor destination tensor * \param[out] destTensor destination tensor
* \param stream CUDA stream to use for the memory transfer * \param stream CUDA stream to use for the memory transfer
* *
* The memory copy starts from begining \p srcMat. The number of elements copied is * The memory copy starts from beginning \p srcMat. The number of elements copied is
* equal to the number of elements in \p destTensor. * equal to the number of elements in \p destTensor.
* *
* Pre-conditions: * Pre-conditions:

View File

@ -73,7 +73,7 @@ inline std::ostream& operator<<(std::ostream& os, bitwiseOp op)
// 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function // 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function
// - available in test body // - available in test body
// 2. Input/output matrices will be initialized by initMatsRandU (in this fixture) // 2. Input/output matrices will be initialized by initMatsRandU (in this fixture)
// 3. Specific parameters: opType, testWithScalar, scale, doReverseOp of correponding types // 3. Specific parameters: opType, testWithScalar, scale, doReverseOp of corresponding types
// - created (and initialized) automatically // - created (and initialized) automatically
// - available in test body // - available in test body
// Note: all parameter _values_ (e.g. type CV_8UC3) are set via INSTANTIATE_TEST_CASE_P macro // Note: all parameter _values_ (e.g. type CV_8UC3) are set via INSTANTIATE_TEST_CASE_P macro

View File

@ -25,7 +25,7 @@ namespace opencv_test
// 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function // 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function
// - available in test body // - available in test body
// 2. Input/output matrices will be initialized by initMatrixRandN (in this fixture) // 2. Input/output matrices will be initialized by initMatrixRandN (in this fixture)
// 3. Specific parameters: cmpF, kernSize, borderType of correponding types // 3. Specific parameters: cmpF, kernSize, borderType of corresponding types
// - created (and initialized) automatically // - created (and initialized) automatically
// - available in test body // - available in test body
// Note: all parameter _values_ (e.g. type CV_8UC3) are set via INSTANTIATE_TEST_CASE_P macro // Note: all parameter _values_ (e.g. type CV_8UC3) are set via INSTANTIATE_TEST_CASE_P macro

View File

@ -195,7 +195,7 @@ g_api_ocv_pair_mat_mat opXor = {std::string{"operator^"},
// 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function // 1. Default parameters: int type, cv::Size sz, int dtype, getCompileArgs() function
// - available in test body // - available in test body
// 2. Input/output matrices will be initialized by initMatsRandU (in this fixture) // 2. Input/output matrices will be initialized by initMatsRandU (in this fixture)
// 3. Specific parameters: cmpF, op of correponding types // 3. Specific parameters: cmpF, op of corresponding types
// - created (and initialized) automatically // - created (and initialized) automatically
// - available in test body // - available in test body
// Note: all parameter _values_ (e.g. type CV_8UC3) are set via INSTANTIATE_TEST_CASE_P macro // Note: all parameter _values_ (e.g. type CV_8UC3) are set via INSTANTIATE_TEST_CASE_P macro

View File

@ -540,7 +540,7 @@ TEST(GAPI_Streaming_Types, XChangeVector)
auto fluid_kernels = cv::gapi::core::fluid::kernels(); auto fluid_kernels = cv::gapi::core::fluid::kernels();
fluid_kernels.include<TypesTest::FluidAddV>(); fluid_kernels.include<TypesTest::FluidAddV>();
// Here OCV takes precedense over Fluid, whith SubC & SumV remaining // Here OCV takes precedense over Fluid, with SubC & SumV remaining
// in Fluid. // in Fluid.
auto kernels = cv::gapi::combine(fluid_kernels, ocv_kernels); auto kernels = cv::gapi::combine(fluid_kernels, ocv_kernels);

View File

@ -83,7 +83,7 @@ void CV_WatershedTest::run( int /* start_from */)
Point* p = (Point*)cvGetSeqElem(cnts, 0); Point* p = (Point*)cvGetSeqElem(cnts, 0);
//expected image was added with 1 in order to save to png //expected image was added with 1 in order to save to png
//so now we substract 1 to get real color //so now we subtract 1 to get real color
if(!exp.empty()) if(!exp.empty())
colors.push_back(exp.ptr(p->y)[p->x] - 1); colors.push_back(exp.ptr(p->y)[p->x] - 1);
} }