Misc. ./samples typos

Found via `codespell -q 3 --skip="./3rdparty" -I ../opencv-whitelist.txt`
This commit is contained in:
luz.paz 2018-02-08 05:51:54 -05:00
parent 0c07836fba
commit dbb57cd0ae
36 changed files with 60 additions and 60 deletions

View File

@ -15,7 +15,7 @@
* 6- Texture Flattening
* The program takes as input a source and a destination image (for 1-3 methods)
* and ouputs the cloned image.
* and outputs the cloned image.
*
* Download test images from opencv_extra folder @github.
*

View File

@ -15,7 +15,7 @@
* 6- Texture Flattening
* The program takes as input a source and a destination image (for 1-3 methods)
* and ouputs the cloned image.
* and outputs the cloned image.
* Step 1:
* -> In the source image, select the region of interest by left click mouse button. A Polygon ROI will be created by left clicking mouse button.
@ -447,7 +447,7 @@ int main()
}
else
{
cout << "Wrong Option Choosen" << endl;
cout << "Wrong Option Chosen" << endl;
exit(1);
}

View File

@ -17,7 +17,7 @@ static void help()
<< "\n------------------------------------------------------------------\n"
<< " This program shows the serial out capabilities of cv::Mat\n"
<< "That is, cv::Mat M(...); cout << M; Now works.\n"
<< "Output can be formated to OpenCV, matlab, python, numpy, csv and \n"
<< "Output can be formatted to OpenCV, matlab, python, numpy, csv and \n"
<< "C styles Usage:\n"
<< "./cvout_sample\n"
<< "------------------------------------------------------------------\n\n"

View File

@ -5,7 +5,7 @@
* Siddharth Kherada <siddharthkherada27[at]gmail[dot]com>
*
* This tutorial demonstrates how to make mask image (black and white).
* The program takes as input a source image and ouputs its corresponding
* The program takes as input a source image and outputs its corresponding
* mask image.
*/

View File

@ -7,8 +7,8 @@ using namespace std;
static void help()
{
cout << "\nThis program demostrates iterative construction of\n"
"delaunay triangulation and voronoi tesselation.\n"
cout << "\nThis program demonstrates iterative construction of\n"
"delaunay triangulation and voronoi tessellation.\n"
"It draws a random set of points in an image and then delaunay triangulates them.\n"
"Usage: \n"
"./delaunay \n"

View File

@ -118,7 +118,7 @@ int main(int argc, char *argv[])
help();
// This descriptor are going to be detect and compute BLOBS with 6 differents params
// These descriptors are going to be detecting and computing BLOBS with 6 different params
// Param for first BLOB detector we want all
typeDesc.push_back("BLOB"); // see http://docs.opencv.org/trunk/d0/d7a/classcv_1_1SimpleBlobDetector.html
pBLOB.push_back(pDefaultBLOB);

View File

@ -18,8 +18,8 @@ static void help(char** av)
cout << "\nfilestorage_sample demonstrate the usage of the opencv serialization functionality.\n"
<< "usage:\n"
<< av[0] << " outputfile.yml.gz\n"
<< "\n outputfile above can have many different extenstions, see below."
<< "\nThis program demonstrates the use of FileStorage for serialization, that is use << and >> in OpenCV\n"
<< "\n outputfile above can have many different extensions, see below."
<< "\nThis program demonstrates the use of FileStorage for serialization, that is in use << and >> in OpenCV\n"
<< "For example, how to create a class and have it serialize, but also how to use it to read and write matrices.\n"
<< "FileStorage allows you to serialize to various formats specified by the file end type."
<< "\nYou should try using different file extensions.(e.g. yaml yml xml xml.gz yaml.gz etc...)\n" << endl;

View File

@ -16,7 +16,7 @@
*
*
* Original Author: Denis Burenkov
* AMS and Direct Methods Autor: Jasper Shemilt
* AMS and Direct Methods Author: Jasper Shemilt
*
*
********************************************************************************/
@ -219,8 +219,8 @@ int main( int argc, char** argv )
return 0;
}
// Define trackbar callback functon. This function find contours,
// draw it and approximate it by ellipses.
// Define trackbar callback function. This function finds contours,
// draws them, and approximates by ellipses.
void processImage(int /*h*/, void*)
{
RotatedRect box, boxAMS, boxDirect;

View File

@ -60,7 +60,7 @@ const std::string keys =
static void help(void)
{
cout << "\nThis file demostrates the use of the ECC image alignment algorithm. When one image"
cout << "\nThis file demonstrates the use of the ECC image alignment algorithm. When one image"
" is given, the template image is artificially formed by a random warp. When both images"
" are given, the initialization of the warp by command line parsing is possible. "
"If inputWarp is missing, the identity transformation initializes the algorithm. \n" << endl;

View File

@ -36,7 +36,7 @@ static void printUsage(const char *arg0)
cout << " -isp=IDX, set profile index of the image stream" << endl;
cout << " -dsp=IDX, set profile index of the depth stream" << endl;
cout << " -ir, show data from IR stream" << endl;
cout << " -imb=VAL, set brighness value for a image stream" << endl;
cout << " -imb=VAL, set brightness value for an image stream" << endl;
cout << " -imc=VAL, set contrast value for a image stream" << endl;
cout << " -pts, print frame index and frame time" << endl;
cout << " --show-closed, print frame index and frame time" << endl;
@ -307,7 +307,7 @@ int main(int argc, char* argv[])
return 0;
}
//Setup additional properies only after set profile of the stream
//Setup additional properties only after set profile of the stream
if ( (-10000.0 < g_imageBrightness) && (g_imageBrightness < 10000.0))
capture.set(CAP_INTELPERC_IMAGE_GENERATOR | CAP_PROP_BRIGHTNESS, g_imageBrightness);
if ( (0 < g_imageContrast) && (g_imageContrast < 10000.0))

View File

@ -164,7 +164,7 @@ int main(int argc, char *argv[])
}
int i=0;
cout << "Cumulative distance between keypoint match for different algorithm and feature detector \n\t";
cout << "We cannot say which is the best but we can say results are differents! \n\t";
cout << "We cannot say which is the best but we can say results are different! \n\t";
for (vector<String>::iterator itMatcher = typeAlgoMatch.begin(); itMatcher != typeAlgoMatch.end(); ++itMatcher)
{
cout<<*itMatcher<<"\t";

View File

@ -1,7 +1,7 @@
/*
*
* select3obj.cpp With a calibration chessboard on a table, mark an object in a 3D box and
* track that object in all subseqent frames as long as the camera can see
* track that object in all subsequent frames as long as the camera can see
* the chessboard. Also segments the object using the box projection. This
* program is useful for collecting large datasets of many views of an object
* on a table.
@ -42,11 +42,11 @@ const char* helphelp =
"\n"
"Using a camera's intrinsics (from calibrating a camera -- see calibration.cpp) and an\n"
"image of the object sitting on a planar surface with a calibration pattern of\n"
"(board_width x board_height) on the surface, we draw a 3D box aroung the object. From\n"
"(board_width x board_height) on the surface, we draw a 3D box around the object. From\n"
"then on, we can move a camera and as long as it sees the chessboard calibration pattern,\n"
"it will store a mask of where the object is. We get succesive images using <output_prefix>\n"
"it will store a mask of where the object is. We get successive images using <output_prefix>\n"
"of the segmentation mask containing the object. This makes creating training sets easy.\n"
"It is best of the chessboard is odd x even in dimensions to avoid amiguous poses.\n"
"It is best if the chessboard is odd x even in dimensions to avoid ambiguous poses.\n"
"\n"
"The actions one can use while the program is running are:\n"
"\n"

View File

@ -16,7 +16,7 @@ using namespace cv;
static void help()
{
printf("\n"
"This program demonstrates a method for shape comparisson based on Shape Context\n"
"This program demonstrates a method for shape comparison based on Shape Context\n"
"You should run the program providing a number between 1 and 20 for selecting an image in the folder ../data/shape_sample.\n"
"Call\n"
"./shape_example [number between 1 and 20, 1 default]\n\n");

View File

@ -16,9 +16,9 @@ public:
{
rng = theRNG();
}
/** Give energy value for a state of system.*/
/** Give energy value for a state of system.*/
double energy() const;
/** Function which change the state of system (random pertubation).*/
/** Function which change the state of system (random perturbation).*/
void changeState();
/** Function to reverse to the previous state.*/
void reverseState();

View File

@ -91,7 +91,7 @@ int main(int argc, char** argv)
char key = 0;
while(key != 'q' && key != 'Q')
{
// those paramaters cannot be =0
// those parameters cannot be =0
// so we must check here
cannyThreshold = std::max(cannyThreshold, 1);
accumulatorThreshold = std::max(accumulatorThreshold, 1);

View File

@ -96,7 +96,7 @@ void goodFeaturesToTrack_Demo( int, void* )
namedWindow( source_window, WINDOW_AUTOSIZE );
imshow( source_window, copy );
/// Set the neeed parameters to find the refined corners
/// Set the needed parameters to find the refined corners
Size winSize = Size( 5, 5 );
Size zeroZone = Size( -1, -1 );
TermCriteria criteria = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 40, 0.001 );

View File

@ -84,7 +84,7 @@ int main(int argc, char *argv[])
"{keypoints k |2000 | number of keypoints to detect }"
"{ratio r |0.7 | threshold for ratio test }"
"{iterations it |500 | RANSAC maximum iterations count }"
"{error e |2.0 | RANSAC reprojection errror }"
"{error e |2.0 | RANSAC reprojection error }"
"{confidence c |0.95 | RANSAC confidence }"
"{inliers in |30 | minimum inliers for Kalman update }"
"{method pnp |0 | PnP method: (0) ITERATIVE - (1) EPNP - (2) P3P - (3) DLS}"

View File

@ -13,7 +13,7 @@ static void help( char* progName)
{
cout << endl << progName
<< " shows how to use cv::Mat and IplImages together (converting back and forth)." << endl
<< "Also contains example for image read, spliting the planes, merging back and " << endl
<< "Also contains example for image read, splitting the planes, merging back and " << endl
<< " color conversion, plus iterating through pixels. " << endl
<< "Usage:" << endl
<< progName << " [image-name Default: ../data/lena.jpg]" << endl << endl;

View File

@ -9,14 +9,14 @@ using namespace cv;
static void help()
{
cout
<< "\n--------------------------------------------------------------------------" << endl
<< "\n---------------------------------------------------------------------------" << endl
<< "This program shows how to create matrices(cv::Mat) in OpenCV and its serial"
<< " out capabilities" << endl
<< "That is, cv::Mat M(...); M.create and cout << M. " << endl
<< "Shows how output can be formated to OpenCV, python, numpy, csv and C styles." << endl
<< "Usage:" << endl
<< "./mat_the_basic_image_container" << endl
<< "--------------------------------------------------------------------------" << endl
<< " out capabilities" << endl
<< "That is, cv::Mat M(...); M.create and cout << M. " << endl
<< "Shows how output can be formatted to OpenCV, python, numpy, csv and C styles." << endl
<< "Usage:" << endl
<< "./mat_the_basic_image_container" << endl
<< "-----------------------------------------------------------------------------" << endl
<< endl;
}
@ -76,7 +76,7 @@ int main(int,char**)
randu(R, Scalar::all(0), Scalar::all(255));
//! [random]
// Demonstrate the output formating options
// Demonstrate the output formatting options
//! [out-default]
cout << "R (default) = " << endl << R << endl << endl;
//! [out-default]

View File

@ -15,7 +15,7 @@
* 6- Texture Flattening
* The program takes as input a source and a destination image (for 1-3 methods)
* and ouputs the cloned image.
* and outputs the cloned image.
*
* Download test images from opencv_extra folder @github.
*

View File

@ -15,7 +15,7 @@
* 6- Texture Flattening
* The program takes as input a source and a destination image (for 1-3 methods)
* and ouputs the cloned image.
* and outputs the cloned image.
* Step 1:
* -> In the source image, select the region of interest by left click mouse button. A Polygon ROI will be created by left clicking mouse button.
@ -446,7 +446,7 @@ int main()
}
else
{
cout << "Wrong Option Choosen" << endl;
cout << "Wrong Option Chosen" << endl;
exit(0);
}

View File

@ -131,7 +131,7 @@ void printHelp()
" --mi-dist-thresh=<float_number>\n"
" Estimated flow distance threshold for motion inpainting. The default is 5.0.\n\n"
" -ci=, --color-inpaint=(no|average|ns|telea)\n"
" Do color inpainting. The defailt is no.\n"
" Do color inpainting. The default is no.\n"
" --ci-radius=<float_number>\n"
" Set color inpainting radius (for ns and telea options only).\n"
" The default is 2.0\n\n"
@ -163,9 +163,9 @@ void printHelp()
" -gpu=(yes|no)\n"
" Use CUDA optimization whenever possible. The default is no.\n\n"
" -o=, --output=(no|<file_path>)\n"
" Set output file path explicitely. The default is stabilized.avi.\n"
" Set output file path explicitly. The default is stabilized.avi.\n"
" --fps=(<float_number>|auto)\n"
" Set output video FPS explicitely. By default the source FPS is used (auto).\n"
" Set output video FPS explicitly. By default the source FPS is used (auto).\n"
" -q, --quiet\n"
" Don't show output video frames.\n\n"
" -h, --help\n"
@ -487,7 +487,7 @@ int main(int argc, const char **argv)
stabilizer->setDeblurer(deblurer);
}
// set up trimming paramters
// set up trimming parameters
stabilizer->setTrimRatio(argf("trim-ratio"));
stabilizer->setCorrectionForInclusion(arg("incl-constr") == "yes");

View File

@ -1,6 +1,6 @@
This is a brief description of training process which has been used to get res10_300x300_ssd_iter_140000.caffemodel.
The model was created with SSD framework using ResNet-10 like architecture as a backbone. Channels count in ResNet-10 convolution layers was significantly dropped (2x- or 4x- fewer channels).
The model was trained in Caffe framework on some huge and avaliable online dataset.
The model was trained in Caffe framework on some huge and available online dataset.
1. Prepare training tools
You need to use "ssd" branch from this repository https://github.com/weiliu89/caffe/tree/ssd . Checkout this branch and built it (see instructions in repo's README)

View File

@ -16,7 +16,7 @@ try:
import cv2 as cv
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
inWidth = 300
inHeight = 300

View File

@ -5,7 +5,7 @@ try:
import cv2 as cv
except ImportError:
raise ImportError('Can\'t find OpenCV Python module. If you\'ve built it from sources without installation, '
'configure environemnt variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
'configure environment variable PYTHONPATH to "opencv_build_dir/lib" directory (with "python3" subdirectory if required)')
from cv2 import dnn

View File

@ -1,4 +1,4 @@
/* This sample demonstrates the way you can perform independed tasks
/* This sample demonstrates the way you can perform independent tasks
on the different GPUs */
// Disable some warnings which are caused with CUDA headers

View File

@ -32,8 +32,8 @@ int main(int argc, const char* argv[])
"{ minDist | 100 | minimum distance between the centers of the detected objects }"
"{ levels | 360 | R-Table levels }"
"{ votesThreshold | 30 | the accumulator threshold for the template centers at the detection stage. The smaller it is, the more false positions may be detected }"
"{ angleThresh | 10000 | angle votes treshold }"
"{ scaleThresh | 1000 | scale votes treshold }"
"{ angleThresh | 10000 | angle votes threshold }"
"{ scaleThresh | 1000 | scale votes threshold }"
"{ posThresh | 100 | position votes threshold }"
"{ dp | 2 | inverse ratio of the accumulator resolution to the image resolution }"
"{ minScale | 0.5 | minimal scale to detect }"
@ -46,7 +46,7 @@ int main(int argc, const char* argv[])
"{ help h ? | | print help message }"
);
cmd.about("This program demonstrates arbitary object finding with the Generalized Hough transform.");
cmd.about("This program demonstrates arbitrary object finding with the Generalized Hough transform.");
if (cmd.has("help"))
{

View File

@ -1,4 +1,4 @@
/* This sample demonstrates the way you can perform independed tasks
/* This sample demonstrates the way you can perform independent tasks
on the different GPUs */
// Disable some warnings which are caused with CUDA headers

View File

@ -371,7 +371,7 @@ int main(int argc, char** argv)
DeviceInfo devInfo(i);
if (!devInfo.isCompatible())
{
cerr << "CUDA module was't built for GPU #" << i << " ("
cerr << "CUDA module wasn't built for GPU #" << i << " ("
<< devInfo.name() << ", CC " << devInfo.majorVersion()
<< devInfo.minorVersion() << endl;
return -1;

View File

@ -67,7 +67,7 @@ if __name__ == '__main__':
cv.setMouseCallback("gray", onmouse)
'''Loop through all the images in the directory'''
for infile in glob.glob( os.path.join(path, '*.*') ):
ext = os.path.splitext(infile)[1][1:] #get the filename extenstion
ext = os.path.splitext(infile)[1][1:] #get the filename extension
if ext == "png" or ext == "jpg" or ext == "bmp" or ext == "tiff" or ext == "pbm":
print(infile)

View File

@ -5,7 +5,7 @@ Planar augmented reality
==================
This sample shows an example of augmented reality overlay over a planar object
tracked by PlaneTracker from plane_tracker.py. solvePnP funciton is used to
tracked by PlaneTracker from plane_tracker.py. solvePnP function is used to
estimate the tracked object location in 3d space.
video: http://www.youtube.com/watch?v=pzVbhxx6aog

View File

@ -8,7 +8,7 @@ frames from a camera of a movie file. Also the sample provides
an example of procedural video generation by an object, mimicking
the VideoCapture interface (see Chess class).
'create_capture' is a convinience function for capture creation,
'create_capture' is a convenience function for capture creation,
falling back to procedural video in case of error.
Usage:

View File

@ -223,7 +223,7 @@ void App::run()
if (output!="" && write_once)
{
if (img_source!="") // wirte image
if (img_source!="") // write image
{
write_once = false;
imwrite(output, img_to_show);

View File

@ -295,7 +295,7 @@ void AdvancedCapture::AddEffectToImageStream()
Windows::Media::MediaProperties::IMediaEncodingProperties ^props = mediaCapture->VideoDeviceController->GetMediaStreamProperties(Windows::Media::Capture::MediaStreamType::Photo);
if(props->Type->Equals("Image"))
{
//Switch to a video media type instead since we cant add an effect to a image media type
//Switch to a video media type instead since we can't add an effect to an image media type
Windows::Foundation::Collections::IVectorView<Windows::Media::MediaProperties::IMediaEncodingProperties^>^ supportedPropsList = mediaCapture->VideoDeviceController->GetAvailableMediaStreamProperties(Windows::Media::Capture::MediaStreamType::Photo);
{
unsigned int i = 0;
@ -565,7 +565,7 @@ void SDKSample::MediaCapture::AdvancedCapture::Button_Click(Platform::Object^ se
{
Windows::Media::MediaProperties::IMediaEncodingProperties ^props = mediaCapture->VideoDeviceController->GetMediaStreamProperties(Windows::Media::Capture::MediaStreamType::VideoRecord);
Windows::Media::MediaProperties::VideoEncodingProperties ^videoEncodingProperties = static_cast<Windows::Media::MediaProperties::VideoEncodingProperties ^>(props);
if(!videoEncodingProperties->Subtype->Equals("H264")) //Cant add an effect to an H264 stream
if(!videoEncodingProperties->Subtype->Equals("H264")) //Can't add an effect to an H264 stream
{
task<void>(mediaCapture->AddEffectAsync(Windows::Media::Capture::MediaStreamType::VideoRecord,"OcvTransform.OcvImageManipulations", nullptr)).then([this](task<void> effectTask2)
{

View File

@ -61,7 +61,7 @@
<Grid Grid.Row="1">
<!-- All XAML in this section is purely for design time so you can see sample content in the designer. -->
<!-- This will be repaced at runtime by live content. -->
<!-- This will be replaced at runtime by live content. -->
<Grid>
<Grid.RowDefinitions>
<RowDefinition Height="Auto"/>

View File

@ -244,7 +244,7 @@ task<void> SuspensionManager::SaveAsync(void)
/// state, which in turn gives their active <see cref="Page"/> an opportunity restore its
/// state.
/// </summary>
/// <param name="version">A version identifer compared to the session state to prevent
/// <param name="version">A version identifier compared to the session state to prevent
/// incompatible versions of session state from reaching app code. Saved state with a
/// different version will be ignored, resulting in an empty <see cref="SessionState"/>
/// dictionary.</param>