From dbb57cd0aefb99cf07f998da6c6dfab254c5748d Mon Sep 17 00:00:00 2001 From: "luz.paz" Date: Thu, 8 Feb 2018 05:51:54 -0500 Subject: [PATCH] Misc. ./samples typos Found via `codespell -q 3 --skip="./3rdparty" -I ../opencv-whitelist.txt` --- samples/cpp/cloning_demo.cpp | 2 +- samples/cpp/cloning_gui.cpp | 4 ++-- samples/cpp/cout_mat.cpp | 2 +- samples/cpp/create_mask.cpp | 2 +- samples/cpp/delaunay2.cpp | 4 ++-- samples/cpp/detect_blob.cpp | 2 +- samples/cpp/filestorage.cpp | 4 ++-- samples/cpp/fitellipse.cpp | 6 +++--- samples/cpp/image_alignment.cpp | 2 +- samples/cpp/intelperc_capture.cpp | 4 ++-- samples/cpp/matchmethod_orb_akaze_brisk.cpp | 2 +- samples/cpp/select3dobj.cpp | 8 ++++---- samples/cpp/shape_example.cpp | 2 +- samples/cpp/travelsalesman.cpp | 4 ++-- .../tutorial_code/ImgTrans/HoughCircle_Demo.cpp | 2 +- .../TrackingMotion/cornerSubPix_Demo.cpp | 2 +- .../src/main_detection.cpp | 2 +- .../interoperability_with_OpenCV_1.cpp | 2 +- .../mat_the_basic_image_container.cpp | 16 ++++++++-------- .../photo/seamless_cloning/cloning_demo.cpp | 2 +- .../photo/seamless_cloning/cloning_gui.cpp | 4 ++-- samples/cpp/videostab.cpp | 8 ++++---- .../face_detector/how_to_train_face_detector.txt | 2 +- samples/dnn/mobilenet_ssd_python.py | 2 +- samples/dnn/resnet_ssd_face_python.py | 2 +- samples/gpu/driver_api_multi.cpp | 2 +- samples/gpu/generalized_hough.cpp | 6 +++--- samples/gpu/multi.cpp | 2 +- samples/gpu/stereo_multi.cpp | 2 +- samples/python/mouse_and_match.py | 2 +- samples/python/plane_ar.py | 2 +- samples/python/video.py | 2 +- samples/tapi/hog.cpp | 2 +- .../ImageManipulations/AdvancedCapture.xaml.cpp | 4 ++-- samples/winrt/ImageManipulations/MainPage.xaml | 2 +- .../common/suspensionmanager.cpp | 2 +- 36 files changed, 60 insertions(+), 60 deletions(-) diff --git a/samples/cpp/cloning_demo.cpp b/samples/cpp/cloning_demo.cpp index be5da04bea..25729db296 100644 --- a/samples/cpp/cloning_demo.cpp +++ b/samples/cpp/cloning_demo.cpp @@ -15,7 +15,7 @@ * 6- Texture Flattening * The program takes as input a source and a destination image (for 1-3 methods) -* and ouputs the cloned image. +* and outputs the cloned image. * * Download test images from opencv_extra folder @github. * diff --git a/samples/cpp/cloning_gui.cpp b/samples/cpp/cloning_gui.cpp index 4358abc33f..b8ac07ff06 100644 --- a/samples/cpp/cloning_gui.cpp +++ b/samples/cpp/cloning_gui.cpp @@ -15,7 +15,7 @@ * 6- Texture Flattening * The program takes as input a source and a destination image (for 1-3 methods) -* and ouputs the cloned image. +* and outputs the cloned image. * Step 1: * -> In the source image, select the region of interest by left click mouse button. A Polygon ROI will be created by left clicking mouse button. @@ -447,7 +447,7 @@ int main() } else { - cout << "Wrong Option Choosen" << endl; + cout << "Wrong Option Chosen" << endl; exit(1); } diff --git a/samples/cpp/cout_mat.cpp b/samples/cpp/cout_mat.cpp index ed2cd71c86..8315d7b780 100644 --- a/samples/cpp/cout_mat.cpp +++ b/samples/cpp/cout_mat.cpp @@ -17,7 +17,7 @@ static void help() << "\n------------------------------------------------------------------\n" << " This program shows the serial out capabilities of cv::Mat\n" << "That is, cv::Mat M(...); cout << M; Now works.\n" - << "Output can be formated to OpenCV, matlab, python, numpy, csv and \n" + << "Output can be formatted to OpenCV, matlab, python, numpy, csv and \n" << "C styles Usage:\n" << "./cvout_sample\n" << "------------------------------------------------------------------\n\n" diff --git a/samples/cpp/create_mask.cpp b/samples/cpp/create_mask.cpp index c87d0fdaee..b925cacba7 100644 --- a/samples/cpp/create_mask.cpp +++ b/samples/cpp/create_mask.cpp @@ -5,7 +5,7 @@ * Siddharth Kherada * * This tutorial demonstrates how to make mask image (black and white). -* The program takes as input a source image and ouputs its corresponding +* The program takes as input a source image and outputs its corresponding * mask image. */ diff --git a/samples/cpp/delaunay2.cpp b/samples/cpp/delaunay2.cpp index 4807cd373f..925477b4a0 100644 --- a/samples/cpp/delaunay2.cpp +++ b/samples/cpp/delaunay2.cpp @@ -7,8 +7,8 @@ using namespace std; static void help() { - cout << "\nThis program demostrates iterative construction of\n" - "delaunay triangulation and voronoi tesselation.\n" + cout << "\nThis program demonstrates iterative construction of\n" + "delaunay triangulation and voronoi tessellation.\n" "It draws a random set of points in an image and then delaunay triangulates them.\n" "Usage: \n" "./delaunay \n" diff --git a/samples/cpp/detect_blob.cpp b/samples/cpp/detect_blob.cpp index 6abe03be4f..c240be40e2 100644 --- a/samples/cpp/detect_blob.cpp +++ b/samples/cpp/detect_blob.cpp @@ -118,7 +118,7 @@ int main(int argc, char *argv[]) help(); - // This descriptor are going to be detect and compute BLOBS with 6 differents params + // These descriptors are going to be detecting and computing BLOBS with 6 different params // Param for first BLOB detector we want all typeDesc.push_back("BLOB"); // see http://docs.opencv.org/trunk/d0/d7a/classcv_1_1SimpleBlobDetector.html pBLOB.push_back(pDefaultBLOB); diff --git a/samples/cpp/filestorage.cpp b/samples/cpp/filestorage.cpp index 46b4da2414..3315455f48 100644 --- a/samples/cpp/filestorage.cpp +++ b/samples/cpp/filestorage.cpp @@ -18,8 +18,8 @@ static void help(char** av) cout << "\nfilestorage_sample demonstrate the usage of the opencv serialization functionality.\n" << "usage:\n" << av[0] << " outputfile.yml.gz\n" - << "\n outputfile above can have many different extenstions, see below." - << "\nThis program demonstrates the use of FileStorage for serialization, that is use << and >> in OpenCV\n" + << "\n outputfile above can have many different extensions, see below." + << "\nThis program demonstrates the use of FileStorage for serialization, that is in use << and >> in OpenCV\n" << "For example, how to create a class and have it serialize, but also how to use it to read and write matrices.\n" << "FileStorage allows you to serialize to various formats specified by the file end type." << "\nYou should try using different file extensions.(e.g. yaml yml xml xml.gz yaml.gz etc...)\n" << endl; diff --git a/samples/cpp/fitellipse.cpp b/samples/cpp/fitellipse.cpp index 2948449b93..3ca4158c3e 100644 --- a/samples/cpp/fitellipse.cpp +++ b/samples/cpp/fitellipse.cpp @@ -16,7 +16,7 @@ * * * Original Author: Denis Burenkov - * AMS and Direct Methods Autor: Jasper Shemilt + * AMS and Direct Methods Author: Jasper Shemilt * * ********************************************************************************/ @@ -219,8 +219,8 @@ int main( int argc, char** argv ) return 0; } -// Define trackbar callback functon. This function find contours, -// draw it and approximate it by ellipses. +// Define trackbar callback function. This function finds contours, +// draws them, and approximates by ellipses. void processImage(int /*h*/, void*) { RotatedRect box, boxAMS, boxDirect; diff --git a/samples/cpp/image_alignment.cpp b/samples/cpp/image_alignment.cpp index 4fd32e347d..c55d1d6ac2 100644 --- a/samples/cpp/image_alignment.cpp +++ b/samples/cpp/image_alignment.cpp @@ -60,7 +60,7 @@ const std::string keys = static void help(void) { - cout << "\nThis file demostrates the use of the ECC image alignment algorithm. When one image" + cout << "\nThis file demonstrates the use of the ECC image alignment algorithm. When one image" " is given, the template image is artificially formed by a random warp. When both images" " are given, the initialization of the warp by command line parsing is possible. " "If inputWarp is missing, the identity transformation initializes the algorithm. \n" << endl; diff --git a/samples/cpp/intelperc_capture.cpp b/samples/cpp/intelperc_capture.cpp index d8a1c32241..b6e66745c4 100644 --- a/samples/cpp/intelperc_capture.cpp +++ b/samples/cpp/intelperc_capture.cpp @@ -36,7 +36,7 @@ static void printUsage(const char *arg0) cout << " -isp=IDX, set profile index of the image stream" << endl; cout << " -dsp=IDX, set profile index of the depth stream" << endl; cout << " -ir, show data from IR stream" << endl; - cout << " -imb=VAL, set brighness value for a image stream" << endl; + cout << " -imb=VAL, set brightness value for an image stream" << endl; cout << " -imc=VAL, set contrast value for a image stream" << endl; cout << " -pts, print frame index and frame time" << endl; cout << " --show-closed, print frame index and frame time" << endl; @@ -307,7 +307,7 @@ int main(int argc, char* argv[]) return 0; } - //Setup additional properies only after set profile of the stream + //Setup additional properties only after set profile of the stream if ( (-10000.0 < g_imageBrightness) && (g_imageBrightness < 10000.0)) capture.set(CAP_INTELPERC_IMAGE_GENERATOR | CAP_PROP_BRIGHTNESS, g_imageBrightness); if ( (0 < g_imageContrast) && (g_imageContrast < 10000.0)) diff --git a/samples/cpp/matchmethod_orb_akaze_brisk.cpp b/samples/cpp/matchmethod_orb_akaze_brisk.cpp index d96ad51fb3..3a3320d8df 100644 --- a/samples/cpp/matchmethod_orb_akaze_brisk.cpp +++ b/samples/cpp/matchmethod_orb_akaze_brisk.cpp @@ -164,7 +164,7 @@ int main(int argc, char *argv[]) } int i=0; cout << "Cumulative distance between keypoint match for different algorithm and feature detector \n\t"; - cout << "We cannot say which is the best but we can say results are differents! \n\t"; + cout << "We cannot say which is the best but we can say results are different! \n\t"; for (vector::iterator itMatcher = typeAlgoMatch.begin(); itMatcher != typeAlgoMatch.end(); ++itMatcher) { cout<<*itMatcher<<"\t"; diff --git a/samples/cpp/select3dobj.cpp b/samples/cpp/select3dobj.cpp index 7df366b756..dda4e770f1 100644 --- a/samples/cpp/select3dobj.cpp +++ b/samples/cpp/select3dobj.cpp @@ -1,7 +1,7 @@ /* * * select3obj.cpp With a calibration chessboard on a table, mark an object in a 3D box and - * track that object in all subseqent frames as long as the camera can see + * track that object in all subsequent frames as long as the camera can see * the chessboard. Also segments the object using the box projection. This * program is useful for collecting large datasets of many views of an object * on a table. @@ -42,11 +42,11 @@ const char* helphelp = "\n" "Using a camera's intrinsics (from calibrating a camera -- see calibration.cpp) and an\n" "image of the object sitting on a planar surface with a calibration pattern of\n" -"(board_width x board_height) on the surface, we draw a 3D box aroung the object. From\n" +"(board_width x board_height) on the surface, we draw a 3D box around the object. From\n" "then on, we can move a camera and as long as it sees the chessboard calibration pattern,\n" -"it will store a mask of where the object is. We get succesive images using \n" +"it will store a mask of where the object is. We get successive images using \n" "of the segmentation mask containing the object. This makes creating training sets easy.\n" -"It is best of the chessboard is odd x even in dimensions to avoid amiguous poses.\n" +"It is best if the chessboard is odd x even in dimensions to avoid ambiguous poses.\n" "\n" "The actions one can use while the program is running are:\n" "\n" diff --git a/samples/cpp/shape_example.cpp b/samples/cpp/shape_example.cpp index 0d5b7bbb6f..6269a942ef 100644 --- a/samples/cpp/shape_example.cpp +++ b/samples/cpp/shape_example.cpp @@ -16,7 +16,7 @@ using namespace cv; static void help() { printf("\n" - "This program demonstrates a method for shape comparisson based on Shape Context\n" + "This program demonstrates a method for shape comparison based on Shape Context\n" "You should run the program providing a number between 1 and 20 for selecting an image in the folder ../data/shape_sample.\n" "Call\n" "./shape_example [number between 1 and 20, 1 default]\n\n"); diff --git a/samples/cpp/travelsalesman.cpp b/samples/cpp/travelsalesman.cpp index 37eea67874..58dc46eb71 100644 --- a/samples/cpp/travelsalesman.cpp +++ b/samples/cpp/travelsalesman.cpp @@ -16,9 +16,9 @@ public: { rng = theRNG(); } - /** Give energy value for a state of system.*/ + /** Give energy value for a state of system.*/ double energy() const; - /** Function which change the state of system (random pertubation).*/ + /** Function which change the state of system (random perturbation).*/ void changeState(); /** Function to reverse to the previous state.*/ void reverseState(); diff --git a/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp b/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp index 604b57f41e..a1c3b1eac2 100644 --- a/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp +++ b/samples/cpp/tutorial_code/ImgTrans/HoughCircle_Demo.cpp @@ -91,7 +91,7 @@ int main(int argc, char** argv) char key = 0; while(key != 'q' && key != 'Q') { - // those paramaters cannot be =0 + // those parameters cannot be =0 // so we must check here cannyThreshold = std::max(cannyThreshold, 1); accumulatorThreshold = std::max(accumulatorThreshold, 1); diff --git a/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp b/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp index daadf41fe9..0addc5503e 100644 --- a/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp +++ b/samples/cpp/tutorial_code/TrackingMotion/cornerSubPix_Demo.cpp @@ -96,7 +96,7 @@ void goodFeaturesToTrack_Demo( int, void* ) namedWindow( source_window, WINDOW_AUTOSIZE ); imshow( source_window, copy ); - /// Set the neeed parameters to find the refined corners + /// Set the needed parameters to find the refined corners Size winSize = Size( 5, 5 ); Size zeroZone = Size( -1, -1 ); TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 40, 0.001 ); diff --git a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp index de6f72bdbc..90ded1a0ad 100644 --- a/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp +++ b/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/src/main_detection.cpp @@ -84,7 +84,7 @@ int main(int argc, char *argv[]) "{keypoints k |2000 | number of keypoints to detect }" "{ratio r |0.7 | threshold for ratio test }" "{iterations it |500 | RANSAC maximum iterations count }" - "{error e |2.0 | RANSAC reprojection errror }" + "{error e |2.0 | RANSAC reprojection error }" "{confidence c |0.95 | RANSAC confidence }" "{inliers in |30 | minimum inliers for Kalman update }" "{method pnp |0 | PnP method: (0) ITERATIVE - (1) EPNP - (2) P3P - (3) DLS}" diff --git a/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp b/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp index 79e7c99d85..4a65456a89 100644 --- a/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp +++ b/samples/cpp/tutorial_code/core/interoperability_with_OpenCV_1/interoperability_with_OpenCV_1.cpp @@ -13,7 +13,7 @@ static void help( char* progName) { cout << endl << progName << " shows how to use cv::Mat and IplImages together (converting back and forth)." << endl - << "Also contains example for image read, spliting the planes, merging back and " << endl + << "Also contains example for image read, splitting the planes, merging back and " << endl << " color conversion, plus iterating through pixels. " << endl << "Usage:" << endl << progName << " [image-name Default: ../data/lena.jpg]" << endl << endl; diff --git a/samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp b/samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp index d65e9b5585..ac1c205258 100644 --- a/samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp +++ b/samples/cpp/tutorial_code/core/mat_the_basic_image_container/mat_the_basic_image_container.cpp @@ -9,14 +9,14 @@ using namespace cv; static void help() { cout - << "\n--------------------------------------------------------------------------" << endl + << "\n---------------------------------------------------------------------------" << endl << "This program shows how to create matrices(cv::Mat) in OpenCV and its serial" - << " out capabilities" << endl - << "That is, cv::Mat M(...); M.create and cout << M. " << endl - << "Shows how output can be formated to OpenCV, python, numpy, csv and C styles." << endl - << "Usage:" << endl - << "./mat_the_basic_image_container" << endl - << "--------------------------------------------------------------------------" << endl + << " out capabilities" << endl + << "That is, cv::Mat M(...); M.create and cout << M. " << endl + << "Shows how output can be formatted to OpenCV, python, numpy, csv and C styles." << endl + << "Usage:" << endl + << "./mat_the_basic_image_container" << endl + << "-----------------------------------------------------------------------------" << endl << endl; } @@ -76,7 +76,7 @@ int main(int,char**) randu(R, Scalar::all(0), Scalar::all(255)); //! [random] - // Demonstrate the output formating options + // Demonstrate the output formatting options //! [out-default] cout << "R (default) = " << endl << R << endl << endl; //! [out-default] diff --git a/samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp b/samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp index 24d9b7facf..b8417116ec 100644 --- a/samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp +++ b/samples/cpp/tutorial_code/photo/seamless_cloning/cloning_demo.cpp @@ -15,7 +15,7 @@ * 6- Texture Flattening * The program takes as input a source and a destination image (for 1-3 methods) -* and ouputs the cloned image. +* and outputs the cloned image. * * Download test images from opencv_extra folder @github. * diff --git a/samples/cpp/tutorial_code/photo/seamless_cloning/cloning_gui.cpp b/samples/cpp/tutorial_code/photo/seamless_cloning/cloning_gui.cpp index 38ce2959a2..8e9e23fb5c 100644 --- a/samples/cpp/tutorial_code/photo/seamless_cloning/cloning_gui.cpp +++ b/samples/cpp/tutorial_code/photo/seamless_cloning/cloning_gui.cpp @@ -15,7 +15,7 @@ * 6- Texture Flattening * The program takes as input a source and a destination image (for 1-3 methods) -* and ouputs the cloned image. +* and outputs the cloned image. * Step 1: * -> In the source image, select the region of interest by left click mouse button. A Polygon ROI will be created by left clicking mouse button. @@ -446,7 +446,7 @@ int main() } else { - cout << "Wrong Option Choosen" << endl; + cout << "Wrong Option Chosen" << endl; exit(0); } diff --git a/samples/cpp/videostab.cpp b/samples/cpp/videostab.cpp index 1bb3fc270f..e9afd0f4fe 100644 --- a/samples/cpp/videostab.cpp +++ b/samples/cpp/videostab.cpp @@ -131,7 +131,7 @@ void printHelp() " --mi-dist-thresh=\n" " Estimated flow distance threshold for motion inpainting. The default is 5.0.\n\n" " -ci=, --color-inpaint=(no|average|ns|telea)\n" - " Do color inpainting. The defailt is no.\n" + " Do color inpainting. The default is no.\n" " --ci-radius=\n" " Set color inpainting radius (for ns and telea options only).\n" " The default is 2.0\n\n" @@ -163,9 +163,9 @@ void printHelp() " -gpu=(yes|no)\n" " Use CUDA optimization whenever possible. The default is no.\n\n" " -o=, --output=(no|)\n" - " Set output file path explicitely. The default is stabilized.avi.\n" + " Set output file path explicitly. The default is stabilized.avi.\n" " --fps=(|auto)\n" - " Set output video FPS explicitely. By default the source FPS is used (auto).\n" + " Set output video FPS explicitly. By default the source FPS is used (auto).\n" " -q, --quiet\n" " Don't show output video frames.\n\n" " -h, --help\n" @@ -487,7 +487,7 @@ int main(int argc, const char **argv) stabilizer->setDeblurer(deblurer); } - // set up trimming paramters + // set up trimming parameters stabilizer->setTrimRatio(argf("trim-ratio")); stabilizer->setCorrectionForInclusion(arg("incl-constr") == "yes"); diff --git a/samples/dnn/face_detector/how_to_train_face_detector.txt b/samples/dnn/face_detector/how_to_train_face_detector.txt index 78789d7ed7..9c170fadd6 100644 --- a/samples/dnn/face_detector/how_to_train_face_detector.txt +++ b/samples/dnn/face_detector/how_to_train_face_detector.txt @@ -1,6 +1,6 @@ This is a brief description of training process which has been used to get res10_300x300_ssd_iter_140000.caffemodel. The model was created with SSD framework using ResNet-10 like architecture as a backbone. Channels count in ResNet-10 convolution layers was significantly dropped (2x- or 4x- fewer channels). -The model was trained in Caffe framework on some huge and avaliable online dataset. +The model was trained in Caffe framework on some huge and available online dataset. 1. Prepare training tools You need to use "ssd" branch from this repository https://github.com/weiliu89/caffe/tree/ssd . Checkout this branch and built it (see instructions in repo's README) diff --git a/samples/dnn/mobilenet_ssd_python.py b/samples/dnn/mobilenet_ssd_python.py index eb23be8a1e..839f8794ac 100644 --- a/samples/dnn/mobilenet_ssd_python.py +++ b/samples/dnn/mobilenet_ssd_python.py @@ -16,7 +16,7 @@ try: import cv2 as cv except ImportError: raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, ' - 'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)') + 'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)') inWidth = 300 inHeight = 300 diff --git a/samples/dnn/resnet_ssd_face_python.py b/samples/dnn/resnet_ssd_face_python.py index f7d88ece4b..3f040a8ae5 100644 --- a/samples/dnn/resnet_ssd_face_python.py +++ b/samples/dnn/resnet_ssd_face_python.py @@ -5,7 +5,7 @@ try: import cv2 as cv except ImportError: raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, ' - 'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)') + 'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)') from cv2 import dnn diff --git a/samples/gpu/driver_api_multi.cpp b/samples/gpu/driver_api_multi.cpp index 6d49ee9990..a82d9e67de 100644 --- a/samples/gpu/driver_api_multi.cpp +++ b/samples/gpu/driver_api_multi.cpp @@ -1,4 +1,4 @@ -/* This sample demonstrates the way you can perform independed tasks +/* This sample demonstrates the way you can perform independent tasks on the different GPUs */ // Disable some warnings which are caused with CUDA headers diff --git a/samples/gpu/generalized_hough.cpp b/samples/gpu/generalized_hough.cpp index 7b7e80ab9d..a607769c16 100644 --- a/samples/gpu/generalized_hough.cpp +++ b/samples/gpu/generalized_hough.cpp @@ -32,8 +32,8 @@ int main(int argc, const char* argv[]) "{ minDist | 100 | minimum distance between the centers of the detected objects }" "{ levels | 360 | R-Table levels }" "{ votesThreshold | 30 | the accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected }" - "{ angleThresh | 10000 | angle votes treshold }" - "{ scaleThresh | 1000 | scale votes treshold }" + "{ angleThresh | 10000 | angle votes threshold }" + "{ scaleThresh | 1000 | scale votes threshold }" "{ posThresh | 100 | position votes threshold }" "{ dp | 2 | inverse ratio of the accumulator resolution to the image resolution }" "{ minScale | 0.5 | minimal scale to detect }" @@ -46,7 +46,7 @@ int main(int argc, const char* argv[]) "{ help h ? | | print help message }" ); - cmd.about("This program demonstrates arbitary object finding with the Generalized Hough transform."); + cmd.about("This program demonstrates arbitrary object finding with the Generalized Hough transform."); if (cmd.has("help")) { diff --git a/samples/gpu/multi.cpp b/samples/gpu/multi.cpp index 9c0e15f213..c3aac72d50 100644 --- a/samples/gpu/multi.cpp +++ b/samples/gpu/multi.cpp @@ -1,4 +1,4 @@ -/* This sample demonstrates the way you can perform independed tasks +/* This sample demonstrates the way you can perform independent tasks on the different GPUs */ // Disable some warnings which are caused with CUDA headers diff --git a/samples/gpu/stereo_multi.cpp b/samples/gpu/stereo_multi.cpp index d64b3583a0..c132d7ffa6 100644 --- a/samples/gpu/stereo_multi.cpp +++ b/samples/gpu/stereo_multi.cpp @@ -371,7 +371,7 @@ int main(int argc, char** argv) DeviceInfo devInfo(i); if (!devInfo.isCompatible()) { - cerr << "CUDA module was't built for GPU #" << i << " (" + cerr << "CUDA module wasn't built for GPU #" << i << " (" << devInfo.name() << ", CC " << devInfo.majorVersion() << devInfo.minorVersion() << endl; return -1; diff --git a/samples/python/mouse_and_match.py b/samples/python/mouse_and_match.py index 1c86f74174..aebca3c32c 100755 --- a/samples/python/mouse_and_match.py +++ b/samples/python/mouse_and_match.py @@ -67,7 +67,7 @@ if __name__ == '__main__': cv.setMouseCallback("gray", onmouse) '''Loop through all the images in the directory''' for infile in glob.glob( os.path.join(path, '*.*') ): - ext = os.path.splitext(infile)[1][1:] #get the filename extenstion + ext = os.path.splitext(infile)[1][1:] #get the filename extension if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm": print(infile) diff --git a/samples/python/plane_ar.py b/samples/python/plane_ar.py index 82652488eb..934550c0f5 100755 --- a/samples/python/plane_ar.py +++ b/samples/python/plane_ar.py @@ -5,7 +5,7 @@ Planar augmented reality ================== This sample shows an example of augmented reality overlay over a planar object -tracked by PlaneTracker from plane_tracker.py. solvePnP funciton is used to +tracked by PlaneTracker from plane_tracker.py. solvePnP function is used to estimate the tracked object location in 3d space. video: http://www.youtube.com/watch?v=pzVbhxx6aog diff --git a/samples/python/video.py b/samples/python/video.py index 42a1d7704f..e4eb2d39e1 100755 --- a/samples/python/video.py +++ b/samples/python/video.py @@ -8,7 +8,7 @@ frames from a camera of a movie file. Also the sample provides an example of procedural video generation by an object, mimicking the VideoCapture interface (see Chess class). -'create_capture' is a convinience function for capture creation, +'create_capture' is a convenience function for capture creation, falling back to procedural video in case of error. Usage: diff --git a/samples/tapi/hog.cpp b/samples/tapi/hog.cpp index f7eed37064..4753c19312 100644 --- a/samples/tapi/hog.cpp +++ b/samples/tapi/hog.cpp @@ -223,7 +223,7 @@ void App::run() if (output!="" && write_once) { - if (img_source!="") // wirte image + if (img_source!="") // write image { write_once = false; imwrite(output, img_to_show); diff --git a/samples/winrt/ImageManipulations/AdvancedCapture.xaml.cpp b/samples/winrt/ImageManipulations/AdvancedCapture.xaml.cpp index cff0a5a799..f345982895 100644 --- a/samples/winrt/ImageManipulations/AdvancedCapture.xaml.cpp +++ b/samples/winrt/ImageManipulations/AdvancedCapture.xaml.cpp @@ -295,7 +295,7 @@ void AdvancedCapture::AddEffectToImageStream() Windows::Media::MediaProperties::IMediaEncodingProperties ^props = mediaCapture->VideoDeviceController->GetMediaStreamProperties(Windows::Media::Capture::MediaStreamType::Photo); if(props->Type->Equals("Image")) { - //Switch to a video media type instead since we cant add an effect to a image media type + //Switch to a video media type instead since we can't add an effect to an image media type Windows::Foundation::Collections::IVectorView^ supportedPropsList = mediaCapture->VideoDeviceController->GetAvailableMediaStreamProperties(Windows::Media::Capture::MediaStreamType::Photo); { unsigned int i = 0; @@ -565,7 +565,7 @@ void SDKSample::MediaCapture::AdvancedCapture::Button_Click(Platform::Object^ se { Windows::Media::MediaProperties::IMediaEncodingProperties ^props = mediaCapture->VideoDeviceController->GetMediaStreamProperties(Windows::Media::Capture::MediaStreamType::VideoRecord); Windows::Media::MediaProperties::VideoEncodingProperties ^videoEncodingProperties = static_cast(props); - if(!videoEncodingProperties->Subtype->Equals("H264")) //Cant add an effect to an H264 stream + if(!videoEncodingProperties->Subtype->Equals("H264")) //Can't add an effect to an H264 stream { task(mediaCapture->AddEffectAsync(Windows::Media::Capture::MediaStreamType::VideoRecord,"OcvTransform.OcvImageManipulations", nullptr)).then([this](task effectTask2) { diff --git a/samples/winrt/ImageManipulations/MainPage.xaml b/samples/winrt/ImageManipulations/MainPage.xaml index 66ce5715f9..7d86d52f45 100644 --- a/samples/winrt/ImageManipulations/MainPage.xaml +++ b/samples/winrt/ImageManipulations/MainPage.xaml @@ -61,7 +61,7 @@ - + diff --git a/samples/winrt/ImageManipulations/common/suspensionmanager.cpp b/samples/winrt/ImageManipulations/common/suspensionmanager.cpp index c1ecf11cfd..06fd263029 100644 --- a/samples/winrt/ImageManipulations/common/suspensionmanager.cpp +++ b/samples/winrt/ImageManipulations/common/suspensionmanager.cpp @@ -244,7 +244,7 @@ task SuspensionManager::SaveAsync(void) /// state, which in turn gives their active an opportunity restore its /// state. /// -/// A version identifer compared to the session state to prevent +/// A version identifier compared to the session state to prevent /// incompatible versions of session state from reaching app code. Saved state with a /// different version will be ignored, resulting in an empty /// dictionary.