Merge pull request #27338 from omahs:patch-1

Fix typos #27338

### Pull Request Readiness Checklist

See details at https://github.com/opencv/opencv/wiki/How_to_contribute#making-a-good-pull-request

- [x] I agree to contribute to the project under Apache 2 License.
- [x] To the best of my knowledge, the proposed patch is not based on a code under GPL or another license that is incompatible with OpenCV
- [x] The PR is proposed to the proper branch
- [ ] There is a reference to the original bug report and related work
- [ ] There is accuracy test, performance test and test data in opencv_extra repository, if applicable
      Patch to opencv_extra has the same branch name.
- [ ] The feature is well documented and sample code can be built with the project CMake
This commit is contained in:
omahs 2025-05-21 11:13:50 +02:00 committed by GitHub
parent dc610867e1
commit 0bc95d9256
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
14 changed files with 19 additions and 19 deletions

View File

@ -113,7 +113,7 @@ int main( int argc, const char** argv )
int timing = 1;
// Value for cols of storing elements
int cols_prefered = 5;
int cols_preferred = 5;
// Open the XML model
FileStorage fs;
@ -218,7 +218,7 @@ int main( int argc, const char** argv )
for(int sid = 0; sid < (int)stage_features.size(); sid ++){
if(draw_planes){
int features_nmbr = (int)stage_features[sid].size();
int cols = cols_prefered;
int cols = cols_preferred;
int rows = features_nmbr / cols;
if( (features_nmbr % cols) > 0){
rows++;
@ -257,7 +257,7 @@ int main( int argc, const char** argv )
result_video.write(temp_window);
// Copy the feature image if needed
if(draw_planes){
single_feature.copyTo(image_plane(Rect(0 + (fid%cols_prefered)*single_feature.cols, 0 + (fid/cols_prefered) * single_feature.rows, single_feature.cols, single_feature.rows)));
single_feature.copyTo(image_plane(Rect(0 + (fid%cols_preferred)*single_feature.cols, 0 + (fid/cols_preferred) * single_feature.rows, single_feature.cols, single_feature.rows)));
}
putText(temp_metadata, meta1.str(), Point(15,15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255));
putText(temp_metadata, meta2.str(), Point(15,40), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255));
@ -291,7 +291,7 @@ int main( int argc, const char** argv )
for(int sid = 0; sid < (int)stage_features.size(); sid ++){
if(draw_planes){
int features_nmbr = (int)stage_features[sid].size();
int cols = cols_prefered;
int cols = cols_preferred;
int rows = features_nmbr / cols;
if( (features_nmbr % cols) > 0){
rows++;
@ -353,7 +353,7 @@ int main( int argc, const char** argv )
// Bottom right
rectangle(single_feature, Rect(resized_inner.x + 2*resized_inner.width, resized_inner.y + 2*resized_inner.height, resized_inner.width, resized_inner.height), Scalar(255), 1);
single_feature.copyTo(image_plane(Rect(0 + (fid%cols_prefered)*single_feature.cols, 0 + (fid/cols_prefered) * single_feature.rows, single_feature.cols, single_feature.rows)));
single_feature.copyTo(image_plane(Rect(0 + (fid%cols_preferred)*single_feature.cols, 0 + (fid/cols_preferred) * single_feature.rows, single_feature.cols, single_feature.rows)));
}
putText(temp_metadata, meta1.str(), Point(15,15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255));

View File

@ -41,7 +41,7 @@ Assuming that we have successfully trained YOLOX model, the subsequent step invo
running this model with OpenCV. There are several critical considerations to address before
proceeding with this process. Let's delve into these aspects.
### YOLO's Pre-proccessing & Output
### YOLO's Pre-processing & Output
Understanding the nature of inputs and outputs associated with YOLO family detectors is pivotal.
These detectors, akin to most Deep Neural Networks (DNN), typically exhibit variation in input

View File

@ -1040,7 +1040,7 @@ namespace CAROTENE_NS {
s32 maxVal, size_t * maxLocPtr, s32 & maxLocCount, s32 maxLocCapacity);
/*
Among each pixel `p` within `src` find min and max values and its first occurences
Among each pixel `p` within `src` find min and max values and its first occurrences
*/
void minMaxLoc(const Size2D &size,
const s8 * srcBase, ptrdiff_t srcStride,

View File

@ -1535,7 +1535,7 @@ public:
return prevPtr;
}
/// vxSwapImageHandle() wrapper for the case when no new pointers provided and previous ones are not needed (retrive memory back)
/// vxSwapImageHandle() wrapper for the case when no new pointers provided and previous ones are not needed (retrieve memory back)
void swapHandle()
{ IVX_CHECK_STATUS( vxSwapImageHandle(ref, 0, 0, 0) ); }

View File

@ -17,7 +17,7 @@ class Error : public Algorithm {
public:
// set model to use getError() function
virtual void setModelParameters (const Mat &model) = 0;
// returns error of point wih @point_idx w.r.t. model
// returns error of point with @point_idx w.r.t. model
virtual float getError (int point_idx) const = 0;
virtual const std::vector<float> &getErrors (const Mat &model) = 0;
};

View File

@ -640,7 +640,7 @@ TEST_F(fisheyeTest, CalibrationWithFixedFocalLength)
cv::fisheye::calibrate(objectPoints, imagePoints, imageSize, theK, theD,
cv::noArray(), cv::noArray(), flag, cv::TermCriteria(3, 20, 1e-6));
// ensure that CALIB_FIX_FOCAL_LENGTH works and focal lenght has not changed
// ensure that CALIB_FIX_FOCAL_LENGTH works and focal length has not changed
EXPECT_EQ(theK(0,0), K(0,0));
EXPECT_EQ(theK(1,1), K(1,1));

View File

@ -405,7 +405,7 @@ public:
//swap axis 0 and 1 input x
cv::Mat tmp;
// Since python input is 4 dimentional and C++ input 3 dimentinal
// we need to proccess each differently
// we need to process each differently
if (input[0].dims == 4){
// here !!!
CV_Assert(input[0].size[3] == 1);

View File

@ -2645,7 +2645,7 @@ void TFImporter::parsePReLU(tensorflow::GraphDef& net, const tensorflow::NodeDef
layerParams.blobs.resize(1);
if (scales.dims == 3) {
// Considering scales from Keras wih HWC layout;
// Considering scales from Keras with HWC layout;
transposeND(scales, {2, 0, 1}, layerParams.blobs[0]);
} else {
layerParams.blobs[0] = scales;

View File

@ -146,7 +146,7 @@ public:
return (ptr_ - other.ptr_) / step_;
}
/* Comparision */
/* Comparison */
bool operator==(const ChannelsIterator<Traits>& other) const CV_NOEXCEPT
{
return ptr_ == other.ptr_;

View File

@ -789,7 +789,7 @@ TEST(Drawing, fillpoly_fully)
cv::Mat labelImage(binary.size(), CV_32S);
cv::Mat labelCentroids;
int labels = cv::connectedComponents(binary, labelImage, 4);
EXPECT_EQ(2, labels) << "artifacts occured";
EXPECT_EQ(2, labels) << "artifacts occurred";
}
// check if filling went over border
@ -878,7 +878,7 @@ PARAM_TEST_CASE(FillPolyFully, unsigned, unsigned, int, int, Point, cv::LineType
cv::Mat labelImage(binary.size(), CV_32S);
cv::Mat labelCentroids;
int labels = cv::connectedComponents(binary, labelImage, 4);
EXPECT_EQ(2, labels) << "artifacts occured";
EXPECT_EQ(2, labels) << "artifacts occurred";
}
void check_filling_over_border(cv::Mat& img, const std::vector<cv::Point>& polygonPoints)

View File

@ -256,7 +256,7 @@ public:
@param filename The input file name
@param headerLineCount The number of lines in the beginning to skip; besides the header, the
function also skips empty lines and lines staring with `#`
function also skips empty lines and lines starting with `#`
@param responseStartIdx Index of the first output variable. If -1, the function considers the
last variable as the response
@param responseEndIdx Index of the last output variable + 1. If -1, then there is single

View File

@ -256,7 +256,7 @@ CvGBTrees::train( const CvMat* _train_data, int _tflag,
// inside gbt learning process only regression decision trees are built
data->is_classifier = false;
// preproccessing sample indices
// preprocessing sample indices
if (_sample_idx)
{
int sample_idx_len = get_len(_sample_idx);

View File

@ -1162,7 +1162,7 @@ bool GStreamerCapture::retrieveFrame(int index, OutputArray dst)
}
}
CV_LOG_ERROR(NULL, "GStreamer(retrive): unrecognized index=" << index);
CV_LOG_ERROR(NULL, "GStreamer(retrieve): unrecognized index=" << index);
return false;
}

View File

@ -440,7 +440,7 @@ struct CvCaptureCAM_V4L CV_FINAL : public IVideoCapture
bool convertableToRgb() const;
void convertToRgb(const Buffer &currentBuffer);
bool havePendingFrame; // true if next .grab() should be noop, .retrive() resets this flag
bool havePendingFrame; // true if next .grab() should be noop, .retrieve() resets this flag
};
/*********************** Implementations ***************************************/