From 3876cf22e3813952baedf19cc9b7e582b66cef7f Mon Sep 17 00:00:00 2001 From: itsyplen Date: Thu, 9 Jun 2011 12:01:47 +0000 Subject: [PATCH] reverted samples with new command argument parser. will be continued after OpenCV release. --- samples/c/adaptiveskindetector.cpp | 57 +++--- samples/c/bgfg_codebook.cpp | 103 +++++----- samples/c/facedetect.cpp | 79 +++++--- samples/c/find_obj.cpp | 41 ++-- samples/c/find_obj_calonder.cpp | 38 ++-- samples/c/find_obj_ferns.cpp | 32 ++- samples/c/latentsvmdetect.cpp | 103 +++++----- samples/c/mser_sample.cpp | 164 +++++++-------- samples/c/one_way_sample.cpp | 36 ++-- samples/c/tree_engine.cpp | 65 +++--- samples/cpp/bagofwords_classification.cpp | 66 +++--- samples/cpp/bgfg_segm.cpp | 26 +-- samples/cpp/brief_match_test.cpp | 38 ++-- samples/cpp/calibration.cpp | 232 ++++++++++++++-------- samples/cpp/camshiftdemo.cpp | 43 ++-- samples/cpp/chamfer.cpp | 37 ++-- 16 files changed, 601 insertions(+), 559 deletions(-) diff --git a/samples/c/adaptiveskindetector.cpp b/samples/c/adaptiveskindetector.cpp index fdb7f178ee..6fdfa96e21 100644 --- a/samples/c/adaptiveskindetector.cpp +++ b/samples/c/adaptiveskindetector.cpp @@ -35,17 +35,27 @@ //M*/ -#include "opencv2/core/core.hpp" -#include "opencv2/contrib/contrib.hpp" -#include "opencv2/highgui/highgui.hpp" - #include #include #include #include +#include "opencv2/contrib/contrib.hpp" +#include "opencv2/highgui/highgui.hpp" -using namespace std; -using namespace cv; +void help(char **argv) +{ + std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n" + << "Usage: " << std::endl << + argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl << + "Example: " << std::endl << + argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg 0 1000" << std::endl << + " iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl << + "If no parameter specified, this application will try to capture from the default Webcam." << std::endl << + "Please note: Background should not contain large surfaces with skin tone." << + "\n\n ESC will stop\n" + "Using OpenCV version %s\n" << CV_VERSION << "\n" + << std::endl; +} class ASDFrameHolder { @@ -149,6 +159,7 @@ void ASDFrameHolder::setImage(IplImage *sourceImage) //-------------------- ASDFrameSequencer -----------------------// + ASDFrameSequencer::~ASDFrameSequencer() { close(); @@ -204,6 +215,7 @@ bool ASDCVFrameSequencer::isOpen() //-------------------- ASDFrameSequencerWebCam -----------------------// + bool ASDFrameSequencerWebCam::open(int cameraIndex) { close(); @@ -323,39 +335,19 @@ void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gVa } }; -void help() +int main(int argc, char** argv ) { - printf("\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector \n" - "which can be found in contrib.cpp \n" - "Usage: \n" - "./adaptiveskindetector [--fileMask]= \n" - "if at least one parameter doesn't specified, it will try to use default webcam \n" - "Expample: \n" - " --fileMask = /home/user_home_directory/work/opencv/samples/c/temp_%%05d.jpg --firstFrame=0 --lastFrame=1000 \n"); -} - -int main(int argc, const char** argv ) -{ - help(); - - CommandLineParser parser(argc, argv); - - string fileMask = parser.get("fileMask"); - int firstFrame = parser.get("firstFrame", 0); - int lastFrame = parser.get("lastFrame", 0); - IplImage *img, *filterMask = NULL; CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE); ASDFrameSequencer *sequencer; CvFont base_font; char caption[2048], s[256], windowName[256]; long int clockTotal = 0, numFrames = 0; - std::clock_t clock; + std::clock_t clock; if (argc < 4) { + help(argv); sequencer = new ASDFrameSequencerWebCam(); (dynamic_cast(sequencer))->open(-1); @@ -366,9 +358,8 @@ int main(int argc, const char** argv ) } else { - // A sequence of images captured from video source, is stored here sequencer = new ASDFrameSequencerImageFile(); - (dynamic_cast(sequencer))->open(fileMask.c_str(), firstFrame, lastFrame ); + (dynamic_cast(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here } std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences"); @@ -376,6 +367,10 @@ int main(int argc, const char** argv ) cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE); cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5); + // Usage: + // c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000 + + std::cout << "Press ESC to stop." << std::endl << std::endl; while ((img = sequencer->getNextImage()) != 0) { numFrames++; diff --git a/samples/c/bgfg_codebook.cpp b/samples/c/bgfg_codebook.cpp index 8ee5b912a7..99960581d5 100644 --- a/samples/c/bgfg_codebook.cpp +++ b/samples/c/bgfg_codebook.cpp @@ -25,14 +25,10 @@ #include #include -#include "opencv2/core/core.hpp" #include "opencv2/video/background_segm.hpp" -#include +#include "opencv2/imgproc/imgproc_c.h" #include "opencv2/highgui/highgui.hpp" -using namespace std; -using namespace cv; - //VARIABLES for CODEBOOK METHOD: CvBGCodeBookModel* model = 0; const int NCHANNELS = 3; @@ -42,28 +38,26 @@ void help(void) { printf("\nLearn background and find foreground using simple average and average difference learning method:\n" "Originally from the book: Learning OpenCV by O'Reilly press\n" - "\nUsage:\n" - "./bgfg_codebook [--nframes]= \n" - " [--input]=\n" - "***Keep the focus on the video windows, NOT the consol***\n\n" - "INTERACTIVE PARAMETERS:\n" - "\tESC,q,Q - quit the program\n" - "\th - print this help\n" - "\tp - pause toggle\n" - "\ts - single step\n" - "\tr - run mode (single step off)\n" - "=== AVG PARAMS ===\n" - "\t- - bump high threshold UP by 0.25\n" - "\t= - bump high threshold DOWN by 0.25\n" - "\t[ - bump low threshold UP by 0.25\n" - "\t] - bump low threshold DOWN by 0.25\n" - "=== CODEBOOK PARAMS ===\n" - "\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n" - "\ta - adjust all 3 channels at once\n" - "\tb - adjust both 2 and 3 at once\n" - "\ti,o - bump upper threshold up,down by 1\n" - "\tk,l - bump lower threshold up,down by 1\n" - "\tSPACE - reset the model\n" + "\nUSAGE:\nbgfg_codebook [--nframes=300] [movie filename, else from camera]\n" + "***Keep the focus on the video windows, NOT the consol***\n\n" + "INTERACTIVE PARAMETERS:\n" + "\tESC,q,Q - quit the program\n" + "\th - print this help\n" + "\tp - pause toggle\n" + "\ts - single step\n" + "\tr - run mode (single step off)\n" + "=== AVG PARAMS ===\n" + "\t- - bump high threshold UP by 0.25\n" + "\t= - bump high threshold DOWN by 0.25\n" + "\t[ - bump low threshold UP by 0.25\n" + "\t] - bump low threshold DOWN by 0.25\n" + "=== CODEBOOK PARAMS ===\n" + "\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n" + "\ta - adjust all 3 channels at once\n" + "\tb - adjust both 2 and 3 at once\n" + "\ti,o - bump upper threshold up,down by 1\n" + "\tk,l - bump lower threshold up,down by 1\n" + "\tSPACE - reset the model\n" ); } @@ -71,20 +65,15 @@ void help(void) //USAGE: ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera] //If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V // -int main(int argc, const char** argv) +int main(int argc, char** argv) { - help(); - - CommandLineParser parser(argc, argv); - - string inputName = parser.get("input", "0"); - int nframesToLearnBG = parser.get("nframes", 300); - + const char* filename = 0; IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0; CvCapture* capture = 0; - int c, n, nframes = 0; + int c, n, nframes = 0; + int nframesToLearnBG = 300; model = cvCreateBGCodeBookModel(); @@ -98,30 +87,38 @@ int main(int argc, const char** argv) bool pause = false; bool singlestep = false; - if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') ) + for( n = 1; n < argc; n++ ) { - printf("Capture from camera\n"); - capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' ); - int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ; - if( !capture) + static const char* nframesOpt = "--nframes="; + if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 ) { - printf ("Capture from CAM %d", c); - printf (" didn't work\n"); - } - } - else - { - printf("Capture from file %s\n",inputName.c_str()); - capture = cvCreateFileCapture(inputName.c_str()); - if( !capture) + if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 ) { - printf ("Capture from file %s", inputName.c_str()); - printf (" didn't work\n"); help(); return -1; } - } + else + filename = argv[n]; + } + + if( !filename ) + { + printf("Capture from camera\n"); + capture = cvCaptureFromCAM( 0 ); + } + else + { + printf("Capture from file %s\n",filename); + capture = cvCreateFileCapture( filename ); + } + + if( !capture ) + { + printf( "Can not initialize video capturing\n\n" ); + help(); + return -1; + } //MAIN PROCESSING LOOP: for(;;) diff --git a/samples/c/facedetect.cpp b/samples/c/facedetect.cpp index 4dcef029a5..c173417667 100644 --- a/samples/c/facedetect.cpp +++ b/samples/c/facedetect.cpp @@ -1,4 +1,3 @@ -#include "opencv2/core/core.hpp" #include "opencv2/objdetect/objdetect.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" @@ -11,13 +10,13 @@ using namespace cv; void help() { - cout << "\nThis program demonstrates the cascade classifier. Now you can use Haar or LBP features.\n" + cout << "\nThis program demonstrates the cascade recognizer. Now you can use Haar or LBP features.\n" "This classifier can recognize many ~rigid objects, it's most known use is for faces.\n" "Usage:\n" "./facedetect [--cascade= this is the primary trained classifier such as frontal face]\n" " [--nested-cascade[=nested_cascade_path this an optional secondary classifier such as eyes]]\n" " [--scale=\n" - " [--input=filename|camera_index]\n\n" + " [filename|camera_index]\n\n" "see facedetect.cmd for one call:\n" "./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye.xml\" --scale=1.3 \n" "Hit any key to quit.\n" @@ -28,41 +27,70 @@ void detectAndDraw( Mat& img, CascadeClassifier& cascade, CascadeClassifier& nestedCascade, double scale); +String cascadeName = "../../data/haarcascades/haarcascade_frontalface_alt.xml"; +String nestedCascadeName = "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml"; + int main( int argc, const char** argv ) { - help(); - - CommandLineParser parser(argc, argv); - - string cascadeName = parser.get("cascade", "../../data/haarcascades/haarcascade_frontalface_alt.xml"); - string nestedCascadeName = parser.get("nested-cascade", "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml"); - double scale = parser.get("scale", 1.0); - string inputName = parser.get("input", "0"); //read from camera by default - - if (!cascadeName.empty()) - cout << " from which we have cascadeName= " << cascadeName << endl; - - if (!nestedCascadeName.empty()) - cout << " from which we have nestedCascadeName= " << nestedCascadeName << endl; - CvCapture* capture = 0; Mat frame, frameCopy, image; + const String scaleOpt = "--scale="; + size_t scaleOptLen = scaleOpt.length(); + const String cascadeOpt = "--cascade="; + size_t cascadeOptLen = cascadeOpt.length(); + const String nestedCascadeOpt = "--nested-cascade"; + size_t nestedCascadeOptLen = nestedCascadeOpt.length(); + String inputName; + + help(); + CascadeClassifier cascade, nestedCascade; + double scale = 1; + + for( int i = 1; i < argc; i++ ) + { + cout << "Processing " << i << " " << argv[i] << endl; + if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 ) + { + cascadeName.assign( argv[i] + cascadeOptLen ); + cout << " from which we have cascadeName= " << cascadeName << endl; + } + else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 ) + { + if( argv[i][nestedCascadeOpt.length()] == '=' ) + nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 ); + if( !nestedCascade.load( nestedCascadeName ) ) + cerr << "WARNING: Could not load classifier cascade for nested objects" << endl; + } + else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 ) + { + if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 ) + scale = 1; + cout << " from which we read scale = " << scale << endl; + } + else if( argv[i][0] == '-' ) + { + cerr << "WARNING: Unknown option %s" << argv[i] << endl; + } + else + inputName.assign( argv[i] ); + } if( !cascade.load( cascadeName ) ) { cerr << "ERROR: Could not load classifier cascade" << endl; + cerr << "Usage: facedetect [--cascade=]\n" + " [--nested-cascade[=nested_cascade_path]]\n" + " [--scale[=\n" + " [filename|camera_index]\n" << endl ; return -1; } - if( !nestedCascade.load( nestedCascadeName ) ) - cerr << "WARNING: Could not load classifier cascade for nested objects" << endl; - if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') ) { capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' ); int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ; - if( !capture) cout << "Capture from CAM " << c << " didn't work" << endl; + if(!capture) cout << "Capture from CAM " << c << " didn't work" << endl; } else if( inputName.size() ) { @@ -70,9 +98,14 @@ int main( int argc, const char** argv ) if( image.empty() ) { capture = cvCaptureFromAVI( inputName.c_str() ); - if( !capture ) cout << "Capture from AVI didn't work" << endl; + if(!capture) cout << "Capture from AVI didn't work" << endl; } } + else + { + image = imread( "lena.jpg", 1 ); + if(image.empty()) cout << "Couldn't read lena.jpg" << endl; + } cvNamedWindow( "result", 1 ); diff --git a/samples/c/find_obj.cpp b/samples/c/find_obj.cpp index 38906d041e..f516326791 100644 --- a/samples/c/find_obj.cpp +++ b/samples/c/find_obj.cpp @@ -4,30 +4,26 @@ * Author: Liu Liu * liuliu.1987+opencv@gmail.com */ -#include "opencv2/core/core.hpp" #include "opencv2/objdetect/objdetect.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/calib3d/calib3d.hpp" -#include +#include "opencv2/imgproc/imgproc_c.h" #include #include using namespace std; -using namespace cv; - void help() { - printf( "\n This program demonstrated the use of the SURF Detector and Descriptor using\n" - "either FLANN (fast approx nearst neighbor classification) or brute force matching\n" - "on planar objects.\n" - "Usage: \n" - "./find_obj [--object_filename]= \n" - " [--scene_filename]=] \n" - "Example: \n" - "./find_obj --object_filename=box.png --scene_filename=box_in_scene.png \n\n" - ); + printf( + "This program demonstrated the use of the SURF Detector and Descriptor using\n" + "either FLANN (fast approx nearst neighbor classification) or brute force matching\n" + "on planar objects.\n" + "Call:\n" + "./find_obj [ ]\n\n" + ); + } // define whether to use approximate nearest-neighbor search @@ -213,16 +209,13 @@ locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors return 1; } -int main(int argc, const char** argv) +int main(int argc, char** argv) { - help(); - - CommandLineParser parser(argc, argv); - - string objectFileName = parser.get("object_filename", "box.png"); - string sceneFileName = parser.get("scene_filename", "box_in_scene.png"); + const char* object_filename = argc == 3 ? argv[1] : "box.png"; + const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png"; CvMemStorage* storage = cvCreateMemStorage(0); + help(); cvNamedWindow("Object", 1); cvNamedWindow("Object Correspond", 1); @@ -239,11 +232,13 @@ int main(int argc, const char** argv) {{255,255,255}} }; - IplImage* object = cvLoadImage( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE ); - IplImage* image = cvLoadImage( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE ); + IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE ); + IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE ); if( !object || !image ) { - fprintf( stderr, "Can not load %s and/or %s\n", objectFileName.c_str(), sceneFileName.c_str() ); + fprintf( stderr, "Can not load %s and/or %s\n" + "Usage: find_obj [ ]\n", + object_filename, scene_filename ); exit(-1); } IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3); diff --git a/samples/c/find_obj_calonder.cpp b/samples/c/find_obj_calonder.cpp index 8f84b3dbd3..1a5abe576b 100644 --- a/samples/c/find_obj_calonder.cpp +++ b/samples/c/find_obj_calonder.cpp @@ -11,17 +11,14 @@ using namespace cv; void help() { - printf("\n This program shows the use of the Calonder point descriptor classifier \n" - "SURF is used to detect interest points, Calonder is used to describe/match these points \n" - "Usage: \n" - "./find_obj_calonder --classifier_file= \n" - " --test_image= \n" - " [--train_container]= \n" - "Example: \n" - " --classifier_file=test_classifier --test_image=lena.jpg --train_container=one_way_train_images.txt \n" - " the test_classifier is created here using --train_container and tested witn --test_image at the end \n" - " --classifier_file=test_classifier --test_image=lena.jpg \n" - " the test classifier is tested here using lena.jpg \n"); + cout << "This program shows the use of the Calonder point descriptor classifier" + "SURF is used to detect interest points, Calonder is used to describe/match these points\n" + "Format:" << endl << + " classifier_file(to write) test_image file_with_train_images_filenames(txt)" << + " or" << endl << + " classifier_file(to read) test_image" + "Using OpenCV version %s\n" << CV_VERSION << "\n" + << endl; } /* * Generates random perspective transform of image @@ -147,27 +144,18 @@ void testCalonderClassifier( const string& classifierFilename, const string& img waitKey(); } -int main( int argc, const char **argv ) +int main( int argc, char **argv ) { - help(); - - CommandLineParser parser(argc, argv); - - string classifierFileName = parser.get("classifier_file"); - string testImageFileName = parser.get("test_image", "lena.jpg"); - string trainContainerFileName = parser.get("train_container"); - - if( classifierFileName.empty()) + if( argc != 4 && argc != 3 ) { - printf("\n Can't find classifier file, please select file for --classifier_file parameter \n"); help(); return -1; } - if( !trainContainerFileName.empty()) - trainCalonderClassifier( classifierFileName.c_str(), trainContainerFileName.c_str() ); + if( argc == 4 ) + trainCalonderClassifier( argv[1], argv[3] ); - testCalonderClassifier( classifierFileName.c_str(), testImageFileName.c_str() ); + testCalonderClassifier( argv[1], argv[2] ); return 0; } diff --git a/samples/c/find_obj_ferns.cpp b/samples/c/find_obj_ferns.cpp index 52c5d26d2f..207619f6c2 100644 --- a/samples/c/find_obj_ferns.cpp +++ b/samples/c/find_obj_ferns.cpp @@ -9,37 +9,30 @@ #include using namespace cv; - void help() { printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n" - "descriptor classifier" - "Usage: \n" - "./find_obj_ferns [--object_filename]= \n" - " [--scene_filename]=] \n" - "Example: \n" - "./find_obj_ferns --object_filename=box.png --scene_filename=box_in_scene.png \n"); + "descriptor classifier" + "Usage:\n" + "./find_obj_ferns [ ]\n" + "\n"); } - -int main(int argc, const char** argv) +int main(int argc, char** argv) { + const char* object_filename = argc > 1 ? argv[1] : "box.png"; + const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png"; + int i; help(); - - CommandLineParser parser(argc, argv); - - string objectFileName = parser.get("object_filename", "box.png"); - string sceneFileName = parser.get("scene_filename", "box_in_scene.png"); - cvNamedWindow("Object", 1); cvNamedWindow("Image", 1); cvNamedWindow("Object Correspondence", 1); - Mat object = imread( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE ); + Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE ); Mat image; double imgscale = 1; - Mat _image = imread( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE ); + Mat _image = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE ); resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC); @@ -47,7 +40,7 @@ int main(int argc, const char** argv) { fprintf( stderr, "Can not load %s and/or %s\n" "Usage: find_obj_ferns [ ]\n", - objectFileName.c_str(), sceneFileName.c_str() ); + object_filename, scene_filename ); exit(-1); } @@ -67,7 +60,7 @@ int main(int argc, const char** argv) vector objKeypoints, imgKeypoints; PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2); - string model_filename = format("%s_model.xml.gz", objectFileName.c_str()); + string model_filename = format("%s_model.xml.gz", object_filename); printf("Trying to load %s ...\n", model_filename.c_str()); FileStorage fs(model_filename, FileStorage::READ); if( fs.isOpened() ) @@ -113,7 +106,6 @@ int main(int argc, const char** argv) t = (double)getTickCount() - t; printf("%gms\n", t*1000/getTickFrequency()); - int i = 0; if( found ) { for( i = 0; i < 4; i++ ) diff --git a/samples/c/latentsvmdetect.cpp b/samples/c/latentsvmdetect.cpp index 6144e85dcf..9f0ef9ccfc 100644 --- a/samples/c/latentsvmdetect.cpp +++ b/samples/c/latentsvmdetect.cpp @@ -1,11 +1,9 @@ -#include "opencv2/core/core.hpp" #include "opencv2/objdetect/objdetect.hpp" #include "opencv2/highgui/highgui.hpp" - #include #ifdef HAVE_CONFIG_H -#include "cvconfig.h" +#include #endif #ifdef HAVE_TBB #include "tbb/task_scheduler_init.h" @@ -15,41 +13,42 @@ using namespace cv; void help() { - printf( "This program demonstrated the use of the latentSVM detector.\n" - "It reads in a trained object model and then uses that to detect the object in an image\n" - "Usage: \n" - "./latentsvmdetect [--image_filename]= \n" - " [--model_filename]= \n" - " [--threads_number]=\n" - "Example: \n" - "./latentsvmdetect --image_filename=cat.jpg --model_filename=cat.xml --threads_number=7 \n" - " Press any key to quit.\n"); + printf( "This program demonstrated the use of the latentSVM detector.\n" + "It reads in a trained object model and then uses that to detect the object in an image\n" + "Call:\n" + "./latentsvmdetect [ []]\n" + " The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n" + " Press any key to quit.\n"); } +const char* model_filename = "cat.xml"; +const char* image_filename = "cat.jpg"; +int tbbNumThreads = -1; void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1) { CvMemStorage* storage = cvCreateMemStorage(0); CvSeq* detections = 0; int i = 0; - int64 start = 0, finish = 0; + int64 start = 0, finish = 0; #ifdef HAVE_TBB tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred); - if (numThreads > 0) - { - init.initialize(numThreads); + if (numThreads > 0) + { + init.initialize(numThreads); printf("Number of threads %i\n", numThreads); - } - else - { - printf("Number of threads is not correct for TBB version"); - return; - } + } + else + { + printf("Number of threads is not correct for TBB version"); + return; + } #endif - start = cvGetTickCount(); + + start = cvGetTickCount(); detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads); - finish = cvGetTickCount(); - printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0)); + finish = cvGetTickCount(); + printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0)); #ifdef HAVE_TBB init.terminate(); @@ -57,43 +56,43 @@ void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, in for( i = 0; i < detections->total; i++ ) { CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i ); - CvRect bounding_box = detection.rect; + CvRect bounding_box = detection.rect; cvRectangle( image, cvPoint(bounding_box.x, bounding_box.y), cvPoint(bounding_box.x + bounding_box.width, - bounding_box.y + bounding_box.height), + bounding_box.y + bounding_box.height), CV_RGB(255,0,0), 3 ); } cvReleaseMemStorage( &storage ); } -int main(int argc, const char* argv[]) +int main(int argc, char* argv[]) { - help(); - - CommandLineParser parser(argc, argv); - - string imageFileName = parser.get("image_filename", "cat.jpg"); - string modelFileName = parser.get("model_filename", "cat.xml"); - int tbbNumThreads = parser.get("threads_number", -1); - - IplImage* image = cvLoadImage(imageFileName.c_str()); - if (!image) - { - printf( "Unable to load the image\n" + help(); + if (argc > 2) + { + image_filename = argv[1]; + model_filename = argv[2]; + if (argc > 3) + { + tbbNumThreads = atoi(argv[3]); + } + } + IplImage* image = cvLoadImage(image_filename); + if (!image) + { + printf( "Unable to load the image\n" "Pass it as the first parameter: latentsvmdetect \n" ); - return -1; - } - CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(modelFileName.c_str()); - if (!detector) - { - printf( "Unable to load the model\n" + return -1; + } + CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_filename); + if (!detector) + { + printf( "Unable to load the model\n" "Pass it as the second parameter: latentsvmdetect \n" ); - cvReleaseImage( &image ); - return -1; - } - + cvReleaseImage( &image ); + return -1; + } detect_and_draw_objects( image, detector, tbbNumThreads ); - cvNamedWindow( "test", 0 ); cvShowImage( "test", image ); cvWaitKey(0); @@ -101,5 +100,5 @@ int main(int argc, const char* argv[]) cvReleaseImage( &image ); cvDestroyAllWindows(); - return 0; + return 0; } diff --git a/samples/c/mser_sample.cpp b/samples/c/mser_sample.cpp index 83b3989fb4..94519d63d4 100644 --- a/samples/c/mser_sample.cpp +++ b/samples/c/mser_sample.cpp @@ -2,24 +2,17 @@ * Copyright� 2009, Liu Liu All rights reserved. */ -#include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/imgproc/imgproc_c.h" -#include - -using namespace std; -using namespace cv; void help() { - printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n" - "It finds the most stable (in size) dark and white regions as a threshold is increased.\n" - "\n Usage: \n" - "./mser_sample [--image_filename] \n" - "Example: \n" - "./mser_sample --image_filename=puzzle.png \n"); + printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n" + "It finds the most stable (in size) dark and white regions as a threshold is increased.\n" + "\nCall:\n" + "./mser_sample \n\n"); } static CvScalar colors[] = @@ -51,81 +44,90 @@ static uchar bcolors[][3] = }; -int main( int argc, const char** argv ) +int main( int argc, char** argv ) { - help(); + char path[1024]; + IplImage* img; + help(); + if (argc!=2) + { + strcpy(path,"puzzle.png"); + img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE ); + if (!img) + { + printf("\nUsage: mser_sample \n"); + return 0; + } + } + else + { + strcpy(path,argv[1]); + img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE ); + } + + if (!img) + { + printf("Unable to load image %s\n",path); + return 0; + } + IplImage* rsp = cvLoadImage( path, CV_LOAD_IMAGE_COLOR ); + IplImage* ellipses = cvCloneImage(rsp); + cvCvtColor(img,ellipses,CV_GRAY2BGR); + CvSeq* contours; + CvMemStorage* storage= cvCreateMemStorage(); + IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 ); + cvCvtColor( rsp, hsv, CV_BGR2YCrCb ); + CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 ); - CommandLineParser parser(argc, argv); + double t = (double)cvGetTickCount(); + cvExtractMSER( hsv, NULL, &contours, storage, params ); + t = cvGetTickCount() - t; + printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) ); + uchar* rsptr = (uchar*)rsp->imageData; + // draw mser with different color + for ( int i = contours->total-1; i >= 0; i-- ) + { + CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i ); + for ( int j = 0; j < r->total; j++ ) + { + CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j ); + rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2]; + rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1]; + rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0]; + } + } + // find ellipse ( it seems cvfitellipse2 have error or sth? + for ( int i = 0; i < contours->total; i++ ) + { + CvContour* r = *(CvContour**)cvGetSeqElem( contours, i ); + CvBox2D box = cvFitEllipse2( r ); + box.angle=(float)CV_PI/2-box.angle; + + if ( r->color > 0 ) + cvEllipseBox( ellipses, box, colors[9], 2 ); + else + cvEllipseBox( ellipses, box, colors[2], 2 ); + + } - string imageFileName = parser.get("image_filename", "puzzle.png"); + cvSaveImage( "rsp.png", rsp ); - IplImage* img; + cvNamedWindow( "original", 0 ); + cvShowImage( "original", img ); + + cvNamedWindow( "response", 0 ); + cvShowImage( "response", rsp ); - img = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE ); - if (!img) - { - printf("Unable to load image %s\n",imageFileName.c_str()); - help(); - return 0; - } + cvNamedWindow( "ellipses", 0 ); + cvShowImage( "ellipses", ellipses ); - IplImage* rsp = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_COLOR ); - IplImage* ellipses = cvCloneImage(rsp); - cvCvtColor(img,ellipses,CV_GRAY2BGR); - CvSeq* contours; - CvMemStorage* storage= cvCreateMemStorage(); - IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 ); - cvCvtColor( rsp, hsv, CV_BGR2YCrCb ); - CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 ); + cvWaitKey(0); - double t = (double)cvGetTickCount(); - cvExtractMSER( hsv, NULL, &contours, storage, params ); - t = cvGetTickCount() - t; - printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) ); - uchar* rsptr = (uchar*)rsp->imageData; - // draw mser with different color - for ( int i = contours->total-1; i >= 0; i-- ) - { - CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i ); - for ( int j = 0; j < r->total; j++ ) - { - CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j ); - rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2]; - rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1]; - rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0]; - } - } - // find ellipse ( it seems cvfitellipse2 have error or sth? - for ( int i = 0; i < contours->total; i++ ) - { - CvContour* r = *(CvContour**)cvGetSeqElem( contours, i ); - CvBox2D box = cvFitEllipse2( r ); - box.angle=(float)CV_PI/2-box.angle; - - if ( r->color > 0 ) - cvEllipseBox( ellipses, box, colors[9], 2 ); - else - cvEllipseBox( ellipses, box, colors[2], 2 ); - - } - - cvSaveImage( "rsp.png", rsp ); - - cvNamedWindow( "original", 0 ); - cvShowImage( "original", img ); - - cvNamedWindow( "response", 0 ); - cvShowImage( "response", rsp ); - - cvNamedWindow( "ellipses", 0 ); - cvShowImage( "ellipses", ellipses ); - - cvWaitKey(0); - - cvDestroyWindow( "original" ); - cvDestroyWindow( "response" ); - cvDestroyWindow( "ellipses" ); - cvReleaseImage(&rsp); - cvReleaseImage(&img); - cvReleaseImage(&ellipses); + cvDestroyWindow( "original" ); + cvDestroyWindow( "response" ); + cvDestroyWindow( "ellipses" ); + cvReleaseImage(&rsp); + cvReleaseImage(&img); + cvReleaseImage(&ellipses); + } diff --git a/samples/c/one_way_sample.cpp b/samples/c/one_way_sample.cpp index 285186b46a..817a1d5816 100644 --- a/samples/c/one_way_sample.cpp +++ b/samples/c/one_way_sample.cpp @@ -7,24 +7,18 @@ * */ -#include "opencv2/core/core.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc_c.h" #include - void help() { - printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n" - "Correspondences are drawn\n" - "Usage: \n" - "./one_way_sample [--path]= \n" - " [--first_image]= \n" - " [--second_image]=\n" - "For example: \n" - " ./one_way_sample --path=../../../opencv/samples/c --first_image=scene_l.bmp --second_image=scene_r.bmp \n"); + printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n" + "Correspondences are drawn\n"); + printf("Format: \n./one_way_sample [path_to_samples] [image1] [image2]\n"); + printf("For example: ./one_way_sample ../../../opencv/samples/c scene_l.bmp scene_r.bmp\n"); } using namespace cv; @@ -32,19 +26,21 @@ using namespace cv; IplImage* DrawCorrespondences(IplImage* img1, const vector& features1, IplImage* img2, const vector& features2, const vector& desc_idx); -int main(int argc, const char** argv) +int main(int argc, char** argv) { - help(); - - CommandLineParser parser(argc, argv); - - std::string path_name = parser.get("path", "../../../opencv/samples/c"); - std::string img1_name = path_name + "/" + parser.get("first_image", "scene_l.bmp"); - std::string img2_name = path_name + "/" + parser.get("second_image", "scene_r.bmp"); - const char images_list[] = "one_way_train_images.txt"; const CvSize patch_size = cvSize(24, 24); - const int pose_count = 1; //50 + const int pose_count = 50; + + if (argc != 3 && argc != 4) + { + help(); + return 0; + } + + std::string path_name = argv[1]; + std::string img1_name = path_name + "/" + std::string(argv[2]); + std::string img2_name = path_name + "/" + std::string(argv[3]); printf("Reading the images...\n"); IplImage* img1 = cvLoadImage(img1_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE); diff --git a/samples/c/tree_engine.cpp b/samples/c/tree_engine.cpp index 0a851e4b53..4f41884601 100644 --- a/samples/c/tree_engine.cpp +++ b/samples/c/tree_engine.cpp @@ -1,26 +1,21 @@ -#include "opencv2/core/core.hpp" #include "opencv2/ml/ml.hpp" #include "opencv2/core/core_c.h" #include #include -using namespace std; -using namespace cv; - void help() { - printf( - "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n" - "CvDTree dtree;\n" - "CvBoost boost;\n" - "CvRTrees rtrees;\n" - "CvERTrees ertrees;\n" - "CvGBTrees gbtrees;\n" - "Usage: \n" - " ./tree_engine [--response_column]= \n" - "[--categorical_response]= \n" - "[--csv_filename]= \n" - ); + printf( + "\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n" + "CvDTree dtree;\n" + "CvBoost boost;\n" + "CvRTrees rtrees;\n" + "CvERTrees ertrees;\n" + "CvGBTrees gbtrees;\n" + "Call:\n\t./tree_engine [-r ] [-c] \n" + "where -r specified the 0-based index of the response (0 by default)\n" + "-c specifies that the response is categorical (it's ordered by default) and\n" + " is the name of training data file in comma-separated value format\n\n"); } @@ -64,24 +59,34 @@ void print_result(float train_err, float test_err, const CvMat* _var_imp) printf("\n"); } -int main(int argc, const char** argv) +int main(int argc, char** argv) { - help(); - - CommandLineParser parser(argc, argv); - - string filename = parser.get("csv_filename"); - int response_idx = parser.get("response_column", 0); - bool categorical_response = (bool)parser.get("categorical_response", 1); - - if(filename.empty()) + if(argc < 2) { - printf("\n Please, select value for --csv_filename key \n"); help(); - return -1; + return 0; + } + const char* filename = 0; + int response_idx = 0; + bool categorical_response = false; + + for(int i = 1; i < argc; i++) + { + if(strcmp(argv[i], "-r") == 0) + sscanf(argv[++i], "%d", &response_idx); + else if(strcmp(argv[i], "-c") == 0) + categorical_response = true; + else if(argv[i][0] != '-' ) + filename = argv[i]; + else + { + printf("Error. Invalid option %s\n", argv[i]); + help(); + return -1; + } } - printf("\nReading in %s...\n\n",filename.c_str()); + printf("\nReading in %s...\n\n",filename); CvDTree dtree; CvBoost boost; CvRTrees rtrees; @@ -93,7 +98,7 @@ int main(int argc, const char** argv) CvTrainTestSplit spl( 0.5f ); - if ( data.read_csv( filename.c_str() ) == 0) + if ( data.read_csv( filename ) == 0) { data.set_response_idx( response_idx ); if(categorical_response) diff --git a/samples/cpp/bagofwords_classification.cpp b/samples/cpp/bagofwords_classification.cpp index 9e3f334476..7203ccc5a9 100644 --- a/samples/cpp/bagofwords_classification.cpp +++ b/samples/cpp/bagofwords_classification.cpp @@ -1,4 +1,3 @@ -#include "opencv2/core/core.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/features2d/features2d.hpp" @@ -28,26 +27,29 @@ const string bowImageDescriptorsDir = "/bowImageDescriptors"; const string svmsDir = "/svms"; const string plotsDir = "/plots"; -void help() +void help(char** argv) { - printf("\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n" - "It shows how to use detectors, descriptors and recognition methods \n" - "Usage: \n" - "Format:\n" - "./bagofwords_classification \n" - "--voc_path= \n" - "--result_directory= \n" - "[--descriptor_extractor]= \n" - "[--descriptor_matcher]= \n" - "\n"); + cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n" + << "It shows how to use detectors, descriptors and recognition methods \n" + "Using OpenCV version %s\n" << CV_VERSION << "\n" + << "Call: \n" + << "Format:\n ./" << argv[0] << " [VOC path] [result directory] \n" + << " or: \n" + << " ./" << argv[0] << " [VOC path] [result directory] [feature detector] [descriptor extractor] [descriptor matcher] \n" + << "\n" + << "Input parameters: \n" + << "[VOC path] Path to Pascal VOC data (e.g. /home/my/VOCdevkit/VOC2010). Note: VOC2007-VOC2010 are supported. \n" + << "[result directory] Path to result diractory. Following folders will be created in [result directory]: \n" + << " bowImageDescriptors - to store image descriptors, \n" + << " svms - to store trained svms, \n" + << " plots - to store files for plots creating. \n" + << "[feature detector] Feature detector name (e.g. SURF, FAST...) - see createFeatureDetector() function in detectors.cpp \n" + << " Currently 12/2010, this is FAST, STAR, SIFT, SURF, MSER, GFTT, HARRIS \n" + << "[descriptor extractor] Descriptor extractor name (e.g. SURF, SIFT) - see createDescriptorExtractor() function in descriptors.cpp \n" + << " Currently 12/2010, this is SURF, OpponentSIFT, SIFT, OpponentSURF, BRIEF \n" + << "[descriptor matcher] Descriptor matcher name (e.g. BruteForce) - see createDescriptorMatcher() function in matchers.cpp \n" + << " Currently 12/2010, this is BruteForce, BruteForce-L1, FlannBased, BruteForce-Hamming, BruteForce-HammingLUT \n" + << "\n"; } @@ -2505,24 +2507,16 @@ void computeGnuPlotOutput( const string& resPath, const string& objClassName, Vo -int main(int argc, const char** argv) +int main(int argc, char** argv) { - help(); - - CommandLineParser parser(argc, argv); - - const string vocPath = parser.get("--voc_path"); - const string resPath = parser.get("--result_directory"); - const string featureDetectName = parser.get("--feature_detector"); - const string descExtName = parser.get("--descriptor_extractor"); - const string descMatchName = parser.get("--descriptor_matcher"); - - if( vocPath.empty() || resPath.empty()) + if( argc != 3 && argc != 6 ) { - help(); - printf("Cannot find --voc_path=%s or --result_directory=%s\n", vocPath.c_str(), resPath.c_str()); + help(argv); return -1; } + + const string vocPath = argv[1], resPath = argv[2]; + // Read or set default parameters string vocName; DDMParams ddmParams; @@ -2540,12 +2534,12 @@ int main(int argc, const char** argv) else { vocName = getVocName(vocPath); - if( featureDetectName.empty() || descExtName.empty() || descMatchName.empty()) + if( argc!= 6 ) { cout << "Feature detector, descriptor extractor, descriptor matcher must be set" << endl; return -1; } - ddmParams = DDMParams( featureDetectName.c_str(), descExtName.c_str(), descMatchName.c_str()); // from command line + ddmParams = DDMParams( argv[3], argv[4], argv[5] ); // from command line // vocabTrainParams and svmTrainParamsExt is set by defaults paramsFS.open( resPath + "/" + paramsFile, FileStorage::WRITE ); if( paramsFS.isOpened() ) diff --git a/samples/cpp/bgfg_segm.cpp b/samples/cpp/bgfg_segm.cpp index 1bb3f5d181..c93f44c5ba 100644 --- a/samples/cpp/bgfg_segm.cpp +++ b/samples/cpp/bgfg_segm.cpp @@ -1,40 +1,32 @@ -#include "opencv2/core/core.hpp" #include "opencv2/video/background_segm.hpp" #include "opencv2/highgui/highgui.hpp" #include using namespace cv; -using namespace std; void help() { printf("\nDo background segmentation, especially demonstrating the use of cvUpdateBGStatModel().\n" -" Learns the background at the start and then segments.\n" -" Learning is togged by the space key. Will read from file or camera\n" -"Usage: \n" -" ./bgfg_segm [--file_name]=\n\n"); +"Learns the background at the start and then segments.\n" +"Learning is togged by the space key. Will read from file or camera\n" +"Call:\n" +"./ bgfg_segm [file name -- if no name, read from camera]\n\n"); } //this is a sample for foreground detection functions -int main(int argc, const char** argv) +int main(int argc, char** argv) { - help(); - - CommandLineParser parser(argc, argv); - - string fileName = parser.get("file_name", "0"); VideoCapture cap; bool update_bg_model = true; - - if(fileName == "0" ) + if( argc < 2 ) cap.open(0); else - cap.open(fileName.c_str()); - + cap.open(argv[1]); + help(); + if( !cap.isOpened() ) { - help(); printf("can not open camera or video file\n"); return -1; } diff --git a/samples/cpp/brief_match_test.cpp b/samples/cpp/brief_match_test.cpp index 0650e30d32..5c491dac2c 100644 --- a/samples/cpp/brief_match_test.cpp +++ b/samples/cpp/brief_match_test.cpp @@ -4,7 +4,6 @@ * Created on: Oct 17, 2010 * Author: ethan */ -#include "opencv2/core/core.hpp" #include "opencv2/calib3d/calib3d.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/imgproc/imgproc.hpp" @@ -12,7 +11,6 @@ #include #include -using namespace std; using namespace cv; using std::cout; @@ -20,15 +18,13 @@ using std::cerr; using std::endl; using std::vector; -void help() -{ - printf("\nThis program shows how to use BRIEF descriptor to match points in features2d\n" - "It takes in two images, finds keypoints and matches them displaying matches and final homography warped results\n" - "Usage: \n" - " ./brief_match_test [--first_file]= \n" - " [--second_file]= \n" - "Example: \n" - "./brief_match_test --first_file=left01.jpg --second_file=left02.jpg \n"); +void help(char **av) +{ + cerr << "usage: " << av[0] << " im1.jpg im2.jpg" + << "\n" + << "This program shows how to use BRIEF descriptor to match points in features2d\n" + << "It takes in two images, finds keypoints and matches them displaying matches and final homography warped results\n" + << endl; } //Copy (x,y) location of descriptor matches found from KeyPoint data structures into Point2f vectors @@ -59,22 +55,16 @@ double match(const vector& /*kpts_train*/, const vector& /*k -int main(int ac, const char ** av) +int main(int ac, char ** av) { - help(); - - CommandLineParser parser(ac, av); - - string im1_name, im2_name; - im1_name = parser.get("first_file", "left01.jpg"); - im2_name = parser.get("second_file", "left02.jpg"); - - if (im1_name.empty() || im2_name.empty()) + if (ac != 3) { - help(); - printf("\n You have to indicate two files first_file and second_file \n"); - return -1; + help(av); + return 1; } + string im1_name, im2_name; + im1_name = av[1]; + im2_name = av[2]; Mat im1 = imread(im1_name, CV_LOAD_IMAGE_GRAYSCALE); Mat im2 = imread(im2_name, CV_LOAD_IMAGE_GRAYSCALE); diff --git a/samples/cpp/calibration.cpp b/samples/cpp/calibration.cpp index 2bf6528df9..ba5c210b21 100644 --- a/samples/cpp/calibration.cpp +++ b/samples/cpp/calibration.cpp @@ -9,59 +9,69 @@ using namespace cv; using namespace std; +const char * usage = +" \nexample command line for calibration from a live feed.\n" +" calibration -w 4 -h 5 -s 0.025 -o camera.yml -op -oe\n" +" \n" +" example command line for calibration from a list of stored images:\n" +" imagelist_creator image_list.xml *.png\n" +" calibration -w 4 -h 5 -s 0.025 -o camera.yml -op -oe image_list.xml\n" +" where image_list.xml is the standard OpenCV XML/YAML\n" +" use imagelist_creator to create the xml or yaml list\n" +" file consisting of the list of strings, e.g.:\n" +" \n" +"\n" +"\n" +"\n" +"view000.png\n" +"view001.png\n" +"\n" +"view003.png\n" +"view010.png\n" +"one_extra_view.jpg\n" +"\n" +"\n"; + + + + +const char* liveCaptureHelp = + "When the live video from camera is used as input, the following hot-keys may be used:\n" + " , 'q' - quit the program\n" + " 'g' - start capturing images\n" + " 'u' - switch undistortion on/off\n"; + void help() { printf( "This is a camera calibration sample.\n" "Usage: calibration\n" - " -w= # the number of inner corners per one of board dimension\n" - " -h= # the number of inner corners per another board dimension\n" - " [-pt]= # the type of pattern: chessboard or circles' grid\n" - " [-n]= # the number of frames to use for calibration\n" + " -w # the number of inner corners per one of board dimension\n" + " -h # the number of inner corners per another board dimension\n" + " [-pt ] # the type of pattern: chessboard or circles' grid\n" + " [-n ] # the number of frames to use for calibration\n" " # (if not specified, it will be set to the number\n" " # of board views actually available)\n" - " [-d]= # a minimum delay in ms between subsequent attempts to capture a next view\n" + " [-d ] # a minimum delay in ms between subsequent attempts to capture a next view\n" " # (used only for video capturing)\n" - " [-s]= # square size in some user-defined units (1 by default)\n" - " [-o]= # the output filename for intrinsic [and extrinsic] parameters\n" + " [-s ] # square size in some user-defined units (1 by default)\n" + " [-o ] # the output filename for intrinsic [and extrinsic] parameters\n" " [-op] # write detected feature points\n" " [-oe] # write extrinsic parameters\n" " [-zt] # assume zero tangential distortion\n" - " [-a]= # fix aspect ratio (fx/fy)\n" + " [-a ] # fix aspect ratio (fx/fy)\n" " [-p] # fix the principal point at the center\n" " [-v] # flip the captured images around the horizontal axis\n" " [-V] # use a video file, and not an image list, uses\n" " # [input_data] string for the video file name\n" " [-su] # show undistorted images after calibration\n" - " [-input_data]= # input data, one of the following:\n" + " [input_data] # input data, one of the following:\n" " # - text file with a list of the images of the board\n" " # the text file can be generated with imagelist_creator\n" " # - name of video file with a video of the board\n" - " [-cameraId]=# if input_data not specified, a live view from the camera is used\n" - " \nExample command line for calibration from a live feed:\n" - " ./calibration -w=4 -h=5 -s=0.025 -o=camera.yml -op -oe\n" - " \n" - " Example command line for calibration from a list of stored images:\n" - " imagelist_creator image_list.xml *.png\n" - " ./calibration -w=4 -h-5 -s=0.025 -o=camera.yml -op -oe -input_data=image_list.xml\n" - " where image_list.xml is the standard OpenCV XML/YAML\n" - " use imagelist_creator to create the xml or yaml list\n" - " file consisting of the list of strings, e.g.:\n" - " \n" - "\n" - "\n" - "\n" - "view000.png\n" - "view001.png\n" - "\n" - "view003.png\n" - "view010.png\n" - "one_extra_view.jpg\n" - "\n" - "\n" - "\nWhen the live video from camera is used as input, the following hot-keys may be used:\n" - " , 'q' - quit the program\n" - " 'g' - start capturing images\n" - " 'u' - switch undistortion on/off\n"); + " # if input_data not specified, a live view from the camera is used\n" + "\n" ); + printf("\n%s",usage); + printf( "\n%s", liveCaptureHelp ); } enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 }; @@ -279,74 +289,126 @@ bool runAndSave(const string& outputFilename, } -int main( int argc, const char** argv ) +int main( int argc, char** argv ) { - help(); - CommandLineParser parser(argc, argv); - Size boardSize, imageSize; - boardSize.width = parser.get("w"); - boardSize.height = parser.get("h"); - float squareSize = parser.get("s", 1.f); - float aspectRatio = parser.get("a", 1.f); + float squareSize = 1.f, aspectRatio = 1.f; Mat cameraMatrix, distCoeffs; - string outputFilename = parser.get("o","out_camera_data.yml"); - string inputFilename = parser.get("input_data"); - int nframes = parser.get("n", 10); - bool writeExtrinsics = parser.get("oe"); - bool writePoints = parser.get("op"); - bool flipVertical = parser.get("v"); - bool showUndistorted = parser.get("su"); - bool videofile = parser.get("V"); - unsigned int delay = parser.get("d", 1000); - unsigned int cameraId = parser.get("cameraId",0); + const char* outputFilename = "out_camera_data.yml"; + const char* inputFilename = 0; + + int i, nframes = 10; + bool writeExtrinsics = false, writePoints = false; bool undistortImage = false; int flags = 0; VideoCapture capture; + bool flipVertical = false; + bool showUndistorted = false; + bool videofile = false; + int delay = 1000; clock_t prevTimestamp = 0; int mode = DETECTION; + int cameraId = 0; vector > imagePoints; vector imageList; Pattern pattern = CHESSBOARD; - if( (boardSize.width < 1) || (boardSize.height < 1)) + if( argc < 2 ) { help(); - return fprintf( stderr, "Invalid board width or height. It must be more than zero\n" ), -1; + return 0; } - if(parser.get("pt")=="circles") - pattern = CIRCLES_GRID; - else if(parser.get("pt")=="acircles") - pattern = ASYMMETRIC_CIRCLES_GRID; - if(squareSize <= 0) + for( i = 1; i < argc; i++ ) { - help(); - return fprintf( stderr, "Invalid board square width. It must be more than zero.\n" ), -1; - } - if(nframes < 4) - { - help(); - return printf("Invalid number of images. It must be more than 3\n" ), -1; - } - if(aspectRatio <= 0) - { - help(); - return printf("Invalid aspect ratio. It must be more than zero\n" ), -1; - } + const char* s = argv[i]; + if( strcmp( s, "-w" ) == 0 ) + { + if( sscanf( argv[++i], "%u", &boardSize.width ) != 1 || boardSize.width <= 0 ) + return fprintf( stderr, "Invalid board width\n" ), -1; + } + else if( strcmp( s, "-h" ) == 0 ) + { + if( sscanf( argv[++i], "%u", &boardSize.height ) != 1 || boardSize.height <= 0 ) + return fprintf( stderr, "Invalid board height\n" ), -1; + } + else if( strcmp( s, "-pt" ) == 0 ) + { + i++; + if( !strcmp( argv[i], "circles" ) ) + pattern = CIRCLES_GRID; + else if( !strcmp( argv[i], "acircles" ) ) + pattern = ASYMMETRIC_CIRCLES_GRID; + else if( !strcmp( argv[i], "chessboard" ) ) + pattern = CHESSBOARD; + else + return fprintf( stderr, "Invalid pattern type: must be chessboard or circles\n" ), -1; + } + else if( strcmp( s, "-s" ) == 0 ) + { + if( sscanf( argv[++i], "%f", &squareSize ) != 1 || squareSize <= 0 ) + return fprintf( stderr, "Invalid board square width\n" ), -1; + } + else if( strcmp( s, "-n" ) == 0 ) + { + if( sscanf( argv[++i], "%u", &nframes ) != 1 || nframes <= 3 ) + return printf("Invalid number of images\n" ), -1; + } + else if( strcmp( s, "-a" ) == 0 ) + { + if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 ) + return printf("Invalid aspect ratio\n" ), -1; + flags |= CV_CALIB_FIX_ASPECT_RATIO; + } + else if( strcmp( s, "-d" ) == 0 ) + { + if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 ) + return printf("Invalid delay\n" ), -1; + } + else if( strcmp( s, "-op" ) == 0 ) + { + writePoints = true; + } + else if( strcmp( s, "-oe" ) == 0 ) + { + writeExtrinsics = true; + } + else if( strcmp( s, "-zt" ) == 0 ) + { + flags |= CV_CALIB_ZERO_TANGENT_DIST; + } + else if( strcmp( s, "-p" ) == 0 ) + { + flags |= CV_CALIB_FIX_PRINCIPAL_POINT; + } + else if( strcmp( s, "-v" ) == 0 ) + { + flipVertical = true; + } + else if( strcmp( s, "-V" ) == 0 ) + { + videofile = true; + } + else if( strcmp( s, "-o" ) == 0 ) + { + outputFilename = argv[++i]; + } + else if( strcmp( s, "-su" ) == 0 ) + { + showUndistorted = true; + } + else if( s[0] != '-' ) + { + if( isdigit(s[0]) ) + sscanf(s, "%d", &cameraId); + else + inputFilename = s; + } else - flags |= CV_CALIB_FIX_ASPECT_RATIO; - if(!delay) - { - help(); - return printf("Invalid delay. It must be more than zero.\n" ), -1; + return fprintf( stderr, "Unknown option %s", s ), -1; } - if(parser.get("zt")) - flags |= CV_CALIB_ZERO_TANGENT_DIST; - if(parser.get("p")) - flags |= CV_CALIB_FIX_PRINCIPAL_POINT; - if( !inputFilename.empty() ) + if( inputFilename ) { if( !videofile && readStringList(inputFilename, imageList) ) mode = CAPTURING; @@ -362,9 +424,11 @@ int main( int argc, const char** argv ) if( !imageList.empty() ) nframes = (int)imageList.size(); + if( capture.isOpened() ) + printf( "%s", liveCaptureHelp ); + namedWindow( "Image View", 1 ); - int i; for(i = 0;;i++) { Mat view, viewGray; diff --git a/samples/cpp/camshiftdemo.cpp b/samples/cpp/camshiftdemo.cpp index b21cd4c6fc..5fcd2c664d 100644 --- a/samples/cpp/camshiftdemo.cpp +++ b/samples/cpp/camshiftdemo.cpp @@ -1,9 +1,8 @@ -#include "opencv2/core/core.hpp" #include "opencv2/video/tracking.hpp" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/highgui/highgui.hpp" -#include +#include #include using namespace cv; @@ -11,17 +10,19 @@ using namespace std; void help() { - printf("\nThis is a demo that shows mean-shift based tracking\n" - "You select a color objects such as your face and it tracks it.\n" - "This reads from video camera (0 by default, or the camera number the user enters\n" - "Usage:\n" - "./camshiftdemo [--cameraIndex]=\n" - "\nHot keys: \n" - "\tESC - quit the program\n" - "\tc - stop the tracking\n" - "\tb - switch to/from backprojection view\n" - "\th - show/hide object histogram\n" - "To initialize tracking, select the object with mouse\n"); + cout << "\nThis is a demo that shows mean-shift based tracking\n" + << "You select a color objects such as your face and it tracks it.\n" + << "This reads from video camera (0 by default, or the camera number the user enters\n" + << "Call:\n" + << "\n./camshiftdemo [camera number]" + << "\n" << endl; + + cout << "\n\nHot keys: \n" + "\tESC - quit the program\n" + "\tc - stop the tracking\n" + "\tb - switch to/from backprojection view\n" + "\th - show/hide object histogram\n" + "To initialize tracking, select the object with mouse\n" << endl; } Mat image; @@ -63,13 +64,8 @@ void onMouse( int event, int x, int y, int, void* ) -int main( int argc, const char** argv ) +int main( int argc, char** argv ) { - help(); - - CommandLineParser parser(argc, argv); - - unsigned int cameraInd = parser.get("cameraIndex", 0); VideoCapture cap; Rect trackWindow; RotatedRect trackBox; @@ -77,15 +73,20 @@ int main( int argc, const char** argv ) float hranges[] = {0,180}; const float* phranges = hranges; - cap.open(cameraInd); + if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0]))) + cap.open(argc == 2 ? argv[1][0] - '0' : 0); + else if( argc == 2 ) + cap.open(argv[1]); if( !cap.isOpened() ) { help(); - printf("***Could not initialize capturing...***\n"); + cout << "***Could not initialize capturing...***\n"; return 0; } + help(); + namedWindow( "Histogram", 1 ); namedWindow( "CamShift Demo", 1 ); setMouseCallback( "CamShift Demo", onMouse, 0 ); diff --git a/samples/cpp/chamfer.cpp b/samples/cpp/chamfer.cpp index f887f539c8..3d4b5c20ff 100644 --- a/samples/cpp/chamfer.cpp +++ b/samples/cpp/chamfer.cpp @@ -2,34 +2,33 @@ #include "opencv2/highgui/highgui.hpp" #include "opencv2/contrib/contrib.hpp" -#include +#include using namespace cv; using namespace std; void help() { - printf("\nThis program demonstrates Chamfer matching -- computing a distance between an \n" - "edge template and a query edge image.\n" - "Usage:\n" - "./chamfer [\n" - "