Merge pull request #13786 from ka7:spelling

This commit is contained in:
Alexander Alekhin 2019-02-11 12:38:49 +00:00
commit f8ac46ba07
36 changed files with 50 additions and 50 deletions

View File

@ -88,7 +88,7 @@ struct CvVectors
#if 0
/* A structure, representing the lattice range of statmodel parameters.
It is used for optimizing statmodel parameters by cross-validation method.
The lattice is logarithmic, so <step> must be greater then 1. */
The lattice is logarithmic, so <step> must be greater than 1. */
typedef struct CvParamLattice
{
double min_val;
@ -158,7 +158,7 @@ protected:
/* The structure, representing the grid range of statmodel parameters.
It is used for optimizing statmodel accuracy by varying model parameters,
the accuracy estimate being computed by cross-validation.
The grid is logarithmic, so <step> must be greater then 1. */
The grid is logarithmic, so <step> must be greater than 1. */
class CvMLData;

View File

@ -12,7 +12,7 @@ python gen_pattern.py -o out.svg -r 11 -c 8 -T circles -s 20.0 -R 5.0 -u mm -w 2
-u, --units - mm, inches, px, m (default mm)
-w, --page_width - page width in units (default 216)
-h, --page_height - page height in units (default 279)
-a, --page_size - page size (default A4), supercedes -h -w arguments
-a, --page_size - page size (default A4), supersedes -h -w arguments
-H, --help - show help
"""

View File

@ -176,7 +176,7 @@ public:
// You would need to provide the method body in the binder code
CV_WRAP_PHANTOM(static void* context());
//! The wrapped method become equvalent to `get(int flags = ACCESS_RW)`
//! The wrapped method become equivalent to `get(int flags = ACCESS_RW)`
CV_WRAP_AS(get) Mat getMat(int flags CV_WRAP_DEFAULT(ACCESS_RW)) const;
};
@endcode

View File

@ -12,7 +12,7 @@ Theory
We know SIFT uses 128-dim vector for descriptors. Since it is using floating point numbers, it takes
basically 512 bytes. Similarly SURF also takes minimum of 256 bytes (for 64-dim). Creating such a
vector for thousands of features takes a lot of memory which are not feasible for resouce-constraint
vector for thousands of features takes a lot of memory which are not feasible for resource-constraint
applications especially for embedded systems. Larger the memory, longer the time it takes for
matching.

View File

@ -262,7 +262,7 @@ Fluid backend to make our graph cache-efficient on CPU.
G-API defines _backend_ as the lower-level entity which knows how to
run kernels. Backends may have (and, in fact, do have) different
_Kernel APIs_ which are used to program and integrate kernels for that
backends. In this context, _kernel_ is an implementaion of an
backends. In this context, _kernel_ is an implementation of an
_operation_, which is defined on the top API level (see
G_TYPED_KERNEL() macro).

View File

@ -1167,7 +1167,7 @@ std::vector<cv::Point2f> Chessboard::Board::getCellCenters()const
int icols = int(colCount());
int irows = int(rowCount());
if(icols < 3 || irows < 3)
throw std::runtime_error("getCellCenters: Chessboard must be at least consist of 3 rows and cols to calcualte the cell centers");
throw std::runtime_error("getCellCenters: Chessboard must be at least consist of 3 rows and cols to calculate the cell centers");
std::vector<cv::Point2f> points;
cv::Matx33d H(estimateHomography(DUMMY_FIELD_SIZE));
@ -1626,7 +1626,7 @@ bool Chessboard::Board::init(const std::vector<cv::Point2f> points)
rows = 3;
cols = 3;
// set inital cell colors
// set initial cell colors
Point2f pt1 = *(cells[0]->top_right)-*(cells[0]->bottom_left);
pt1 /= cv::norm(pt1);
cv::Point2f pt2(cos(white_angle),-sin(white_angle));
@ -2923,7 +2923,7 @@ Chessboard::BState Chessboard::generateBoards(cv::flann::Index &flann_index,cons
points.push_back(*iter1);
}
// genreate pairs those connection goes through the center
// generate pairs those connection goes through the center
std::vector<std::pair<cv::KeyPoint,cv::KeyPoint> > pairs;
iter1 = points.begin();
for(;iter1 != points.end();++iter1)
@ -3209,7 +3209,7 @@ bool findChessboardCornersSB(cv::InputArray image_, cv::Size pattern_size,
flags ^= CALIB_CB_ACCURACY;
}
if(flags)
CV_Error(Error::StsOutOfRange, cv::format("Invalid remaing flags %d", (int)flags));
CV_Error(Error::StsOutOfRange, cv::format("Invalid remaining flags %d", (int)flags));
std::vector<cv::KeyPoint> corners;
details::Chessboard board(para);

View File

@ -426,7 +426,7 @@ class Chessboard: public cv::Feature2D
size_t rowCount() const;
/**
* \brief Returns the inner contour of the board inlcuding only valid corners
* \brief Returns the inner contour of the board including only valid corners
*
* \info the contour might be non squared if not all points of the board are defined
*

View File

@ -2164,7 +2164,7 @@ inline void RHO_HEST_REFC::refine(void){
* order to compute a candidate homography (newH).
*
* The system above is solved by Cholesky decomposition of a
* sufficently-damped JtJ into a lower-triangular matrix (and its
* sufficiently-damped JtJ into a lower-triangular matrix (and its
* transpose), whose inverse is then computed. This inverse (and its
* transpose) then multiply Jte in order to find dH.
*/

View File

@ -105,7 +105,7 @@ double memory deallocation.
CV_EXPORTS void fastFree(void* ptr);
/*!
The STL-compilant memory Allocator based on cv::fastMalloc() and cv::fastFree()
The STL-compliant memory Allocator based on cv::fastMalloc() and cv::fastFree()
*/
template<typename _Tp> class Allocator
{

View File

@ -117,7 +117,7 @@ struct Ptr : public std::shared_ptr<T>
T* operator->() const CV_NOEXCEPT { return std::shared_ptr<T>::get();}
typename std::add_lvalue_reference<T>::type operator*() const CV_NOEXCEPT { return *std::shared_ptr<T>::get(); }
// OpenCV 3.x methods (not a part of standart C++ library)
// OpenCV 3.x methods (not a part of standard C++ library)
inline void release() { std::shared_ptr<T>::reset(); }
inline operator T* () const { return std::shared_ptr<T>::get(); }
inline bool empty() const { return std::shared_ptr<T>::get() == nullptr; }

View File

@ -2266,7 +2266,7 @@ inline v_float32x4 v_matmuladd(const v_float32x4& v, const v_float32x4& m0,
v.s[0]*m0.s[3] + v.s[1]*m1.s[3] + v.s[2]*m2.s[3] + m3.s[3]);
}
////// FP16 suport ///////
////// FP16 support ///////
inline v_reg<float, V_TypeTraits<float>::nlanes128>
v_load_expand(const float16_t* ptr)

View File

@ -1635,7 +1635,7 @@ inline void v_lut_deinterleave(const double* tab, const v_int32x4& idxvec, v_flo
}
#endif
////// FP16 suport ///////
////// FP16 support ///////
#if CV_FP16
inline v_float32x4 v_load_expand(const float16_t* ptr)
{

View File

@ -33,7 +33,7 @@ String dumpInputArray(InputArray argument)
}
catch (...)
{
ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
@ -70,7 +70,7 @@ CV_EXPORTS_W String dumpInputArrayOfArrays(InputArrayOfArrays argument)
}
catch (...)
{
ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
@ -100,7 +100,7 @@ CV_EXPORTS_W String dumpInputOutputArray(InputOutputArray argument)
}
catch (...)
{
ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}
@ -137,7 +137,7 @@ CV_EXPORTS_W String dumpInputOutputArrayOfArrays(InputOutputArrayOfArrays argume
}
catch (...)
{
ss << " ERROR: exception occured, dump is non-complete"; // need to properly support different kinds
ss << " ERROR: exception occurred, dump is non-complete"; // need to properly support different kinds
}
return ss.str();
}

View File

@ -246,7 +246,7 @@ BinaryFunc getCopyMaskFunc(size_t esz);
// There is some mess in code with vectors representation.
// Both vector-column / vector-rows are used with dims=2 (as Mat2D always).
// Reshape matrices if neccessary (in case of vectors) and returns size with scaled width.
// Reshape matrices if necessary (in case of vectors) and returns size with scaled width.
Size getContinuousSize2D(Mat& m1, int widthScale=1);
Size getContinuousSize2D(Mat& m1, Mat& m2, int widthScale=1);
Size getContinuousSize2D(Mat& m1, Mat& m2, Mat& m3, int widthScale=1);

View File

@ -344,7 +344,7 @@ cv::String findDataFile(const cv::String& relative_path,
#if defined OPENCV_INSTALL_PREFIX && defined OPENCV_DATA_INSTALL_PATH
cv::String install_dir(OPENCV_INSTALL_PREFIX);
// use core/world module path and verify that library is running from installation directory
// It is neccessary to avoid touching of unrelated common /usr/local path
// It is necessary to avoid touching of unrelated common /usr/local path
if (module_path.empty()) // can't determine
module_path = install_dir;
if (isSubDirectory(install_dir, module_path) || isSubDirectory(utils::fs::canonical(install_dir), utils::fs::canonical(module_path)))

View File

@ -119,7 +119,7 @@ message AttributeProto {
// implementations needed to use has_field hueristics to determine
// which value field was in use. For IR_VERSION 0.0.2 or later, this
// field MUST be set and match the f|i|s|t|... field in use. This
// change was made to accomodate proto3 implementations.
// change was made to accommodate proto3 implementations.
optional AttributeType type = 20; // discriminator that indicates which field below is in use
// Exactly ONE of the following fields must be present for this version of the IR

View File

@ -41,7 +41,7 @@ but it requires extra skills and knowledge of the target platform and
the algorithm implementation changes irrevocably -- becoming more
specific, less flexible, and harder to extend and maintain.
G-API takes this responsiblity and complexity from user and does the
G-API takes this responsibility and complexity from user and does the
majority of the work by itself, keeping the algorithm code clean from
device or optimization details. This approach has its own limitations,
though, as graph model is a _constrained_ model and not every

View File

@ -1255,7 +1255,7 @@ GAPI_EXPORTS std::tuple<GMat, GMat> integral(const GMat& src, int sdepth = -1, i
/** @brief Applies a fixed-level threshold to each matrix element.
The function applies fixed-level thresholding to a single- or multiple-channel matrix.
The function is typically used to get a bi-level (binary) image out of a grayscale image ( cmp funtions could be also used for
The function is typically used to get a bi-level (binary) image out of a grayscale image ( cmp functions could be also used for
this purpose) or for removing a noise, that is, filtering out pixels with too small or too large
values. There are several depths of thresholding supported by the function. They are determined by
depth parameter.

View File

@ -123,7 +123,7 @@ public:
*
* This may be useful since all temporary objects (cv::GMats) and
* namespaces can be localized to scope of lambda, without
* contaminating the parent scope with probably unecessary objects
* contaminating the parent scope with probably unnecessary objects
* and information.
*
* @param gen generator function which returns a cv::GComputation,

View File

@ -376,7 +376,7 @@ namespace gapi {
public:
/**
* @brief Returns total number of kernels in the package
* (accross all backends included)
* (across all backends included)
*
* @return a number of kernels in the package
*/

View File

@ -416,7 +416,7 @@ GAPI_FLUID_KERNEL(GFluidBlur, cv::gapi::imgproc::GBlur, true)
// TODO: support sizes 3, 5, 7, 9, ...
GAPI_Assert(kernelSize.width == 3 && kernelSize.height == 3);
// TODO: suport non-trivial anchor
// TODO: support non-trivial anchor
GAPI_Assert(anchor.x == -1 && anchor.y == -1);
static const bool normalize = true;
@ -488,7 +488,7 @@ GAPI_FLUID_KERNEL(GFluidBoxFilter, cv::gapi::imgproc::GBoxFilter, true)
// TODO: support sizes 3, 5, 7, 9, ...
GAPI_Assert(kernelSize.width == 3 && kernelSize.height == 3);
// TODO: suport non-trivial anchor
// TODO: support non-trivial anchor
GAPI_Assert(anchor.x == -1 && anchor.y == -1);
int width = src.length();

View File

@ -15,7 +15,7 @@
#include "executor/gexecutor.hpp"
// NB: BTW, GCompiled is the only "public API" class which
// private part (implementaion) is hosted in the "compiler/" module.
// private part (implementation) is hosted in the "compiler/" module.
//
// This file is here just to keep ADE hidden from the top-level APIs.
//

View File

@ -579,7 +579,7 @@ namespace
auto l_obj = gim.metadata(lhs_nh).get<FusedIsland>().object;
auto r_obj = gim.metadata(rhs_nh).get<FusedIsland>().object;
GAPI_LOG_INFO(NULL, r_obj->name() << " can be merged into " << l_obj->name());
// Try to do a merge. If merge was succesfull, check if the
// Try to do a merge. If merge was successful, check if the
// graph have cycles (cycles are prohibited at this point).
// If there are cycles, roll-back the merge and mark a pair of
// these Islands with a special tag - "cycle-causing".

View File

@ -101,7 +101,7 @@ void cv::gimpl::passes::checkIslands(ade::passes::PassContext &ctx)
{
GModel::ConstGraph gr(ctx.graph);
// The algorithm is teh following:
// The algorithm is the following:
//
// 1. Put all Tagged nodes (both Operations and Data) into a set
// 2. Initialize Visited set as (empty)

View File

@ -129,7 +129,7 @@ void cv::gimpl::passes::expandKernels(ade::passes::PassContext &ctx, const gapi:
GModel::Graph gr(ctx.graph);
// Repeat the loop while there are compound kernels.
// Restart procedure after every successfull unrolling
// Restart procedure after every successful unrolling
bool has_compound_kernel = true;
while (has_compound_kernel)
{

View File

@ -67,11 +67,11 @@ TEST_F(GCompiledValidateMetaTyped, InvalidMeta)
cv::Scalar sc(33);
cv::Mat out;
// 3 channels intead 1
// 3 channels instead 1
cv::Mat in1 = cv::Mat::eye(cv::Size(64,32), CV_8UC3);
EXPECT_THROW(f(in1, sc, out), std::logic_error);
// 32f intead 8u
// 32f instead 8u
cv::Mat in2 = cv::Mat::eye(cv::Size(64,32), CV_32F);
EXPECT_THROW(f(in2, sc, out), std::logic_error);
@ -112,11 +112,11 @@ TEST_F(GCompiledValidateMetaUntyped, InvalidMetaValues)
cv::Scalar sc(33);
cv::Mat out;
// 3 channels intead 1
// 3 channels instead 1
cv::Mat in1 = cv::Mat::eye(cv::Size(64,32), CV_8UC3);
EXPECT_THROW(f(cv::gin(in1, sc), cv::gout(out)), std::logic_error);
// 32f intead 8u
// 32f instead 8u
cv::Mat in2 = cv::Mat::eye(cv::Size(64,32), CV_32F);
EXPECT_THROW(f(cv::gin(in2, sc), cv::gout(out)), std::logic_error);

View File

@ -73,7 +73,7 @@ TEST(GMetaArg, Traits_Are_ButLast_Positive)
using namespace cv::detail;
static_assert(are_meta_descrs_but_last<cv::GScalarDesc, int>::value,
"List is valid (int is ommitted)");
"List is valid (int is omitted)");
static_assert(are_meta_descrs_but_last<cv::GMatDesc, cv::GScalarDesc, cv::GCompileArgs>::value,
"List is valid (GCompileArgs are omitted)");

View File

@ -320,7 +320,7 @@ TEST(IslandsFusion, PartionOverlapUserIsland)
// |
// (in1) --------------------------`
// Check that internal islands does't overlap user island
// Check that internal islands doesn't overlap user island
namespace J = Jupiter;
namespace S = Saturn;

View File

@ -124,7 +124,7 @@ TEST_F(Islands, TwoIslands)
}
// FIXME: Disabled since currently merge procedure merges two into one
// succesfully
// successfully
TEST_F(Islands, DISABLED_Two_Islands_With_Same_Name_Should_Fail)
{
// (in) -> Blur1 -> (tmp0) -> Blur2 -> (tmp1) -> Blur3 -> (tmp2) -> Blur4 -> (out)

View File

@ -90,7 +90,7 @@ TYPED_TEST(VectorRefT, ReadAfterWrite)
EXPECT_EQ(0u, writer.wref().size()); // Check the initial state
EXPECT_EQ(0u, reader.rref().size());
writer.wref().emplace_back(); // Check that write is successfull
writer.wref().emplace_back(); // Check that write is successful
EXPECT_EQ(1u, writer.wref().size());
EXPECT_EQ(1u, vec.size()); // Check that changes are reflected to the original container
@ -183,7 +183,7 @@ TYPED_TEST(VectorRefU, ReadAfterWrite)
EXPECT_EQ(0u, writer.wref<T>().size()); // Check the initial state
EXPECT_EQ(0u, reader.rref<T>().size());
writer.wref<T>().emplace_back(); // Check that write is successfull
writer.wref<T>().emplace_back(); // Check that write is successful
EXPECT_EQ(1u, writer.wref<T>().size());
EXPECT_EQ(1u, vec.size()); // Check that changes are reflected to the original container

View File

@ -17,8 +17,8 @@ static_assert(sizeof(float) == 4, "float must be 32 bit.");
bool is_byte_order_swapped(double scale)
{
// ".pfm" format file specifies that:
// positive scale means big endianess;
// negative scale means little endianess.
// positive scale means big endianness;
// negative scale means little endianness.
#ifdef WORDS_BIGENDIAN
return scale < 0.0;

View File

@ -120,7 +120,7 @@ public:
\f[(minVal, minVal*step, minVal*{step}^2, \dots, minVal*{logStep}^n),\f]
where \f$n\f$ is the maximal index satisfying
\f[\texttt{minVal} * \texttt{logStep} ^n < \texttt{maxVal}\f]
The grid is logarithmic, so logStep must always be greater then 1. Default value is 1.
The grid is logarithmic, so logStep must always be greater than 1. Default value is 1.
*/
CV_PROP_RW double logStep;

View File

@ -99,7 +99,7 @@ static void checkParamGrid(const ParamGrid& pg)
if( pg.minVal < DBL_EPSILON )
CV_Error( CV_StsBadArg, "Lower bound of the grid must be positive" );
if( pg.logStep < 1. + FLT_EPSILON )
CV_Error( CV_StsBadArg, "Grid step must greater then 1" );
CV_Error( CV_StsBadArg, "Grid step must greater than 1" );
}
// SVM training parameters

View File

@ -133,7 +133,7 @@ private:
};
/** @brief Exposure compensator which tries to remove exposure related artifacts by adjusting image
intensities on each channel independantly.
intensities on each channel independently.
*/
class CV_EXPORTS_W ChannelsCompensator : public ExposureCompensator
{

View File

@ -249,8 +249,8 @@ where src[i] and dst[i] are the i-th points in src and dst, respectively
\f[\begin{bmatrix} a_{11} & a_{12} & b_1 \\ -a_{12} & a_{11} & b_2 \end{bmatrix}\f]
when fullAffine=false.
@deprecated Use cv::estimateAffine2D, cv::estimateAffinePartial2D instead. If you are using this fuction
with images, extract points using cv::calcOpticalFlowPyrLK and then use the estimation fuctions.
@deprecated Use cv::estimateAffine2D, cv::estimateAffinePartial2D instead. If you are using this function
with images, extract points using cv::calcOpticalFlowPyrLK and then use the estimation functions.
@sa
estimateAffine2D, estimateAffinePartial2D, getAffineTransform, getPerspectiveTransform, findHomography

View File

@ -2178,7 +2178,7 @@ void videoInput::setPhyCon(int id, int conn){
// ----------------------------------------------------------------------
// Check that we are not trying to setup a non-existant device
// Check that we are not trying to setup a non-existent device
// Then start the graph building!
// ----------------------------------------------------------------------