mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 03:00:14 +08:00
reverted samples with new command argument parser. will be continued after OpenCV release.
This commit is contained in:
parent
8f4f982e5c
commit
3876cf22e3
@ -35,17 +35,27 @@
|
||||
//M*/
|
||||
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <iostream>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <ctime>
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
void help(char **argv)
|
||||
{
|
||||
std::cout << "\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector which can be found in contrib.cpp\n"
|
||||
<< "Usage: " << std::endl <<
|
||||
argv[0] << " fileMask firstFrame lastFrame" << std::endl << std::endl <<
|
||||
"Example: " << std::endl <<
|
||||
argv[0] << " C:\\VideoSequences\\sample1\\right_view\\temp_%05d.jpg 0 1000" << std::endl <<
|
||||
" iterates through temp_00000.jpg to temp_01000.jpg" << std::endl << std::endl <<
|
||||
"If no parameter specified, this application will try to capture from the default Webcam." << std::endl <<
|
||||
"Please note: Background should not contain large surfaces with skin tone." <<
|
||||
"\n\n ESC will stop\n"
|
||||
"Using OpenCV version %s\n" << CV_VERSION << "\n"
|
||||
<< std::endl;
|
||||
}
|
||||
|
||||
class ASDFrameHolder
|
||||
{
|
||||
@ -149,6 +159,7 @@ void ASDFrameHolder::setImage(IplImage *sourceImage)
|
||||
|
||||
|
||||
//-------------------- ASDFrameSequencer -----------------------//
|
||||
|
||||
ASDFrameSequencer::~ASDFrameSequencer()
|
||||
{
|
||||
close();
|
||||
@ -204,6 +215,7 @@ bool ASDCVFrameSequencer::isOpen()
|
||||
|
||||
|
||||
//-------------------- ASDFrameSequencerWebCam -----------------------//
|
||||
|
||||
bool ASDFrameSequencerWebCam::open(int cameraIndex)
|
||||
{
|
||||
close();
|
||||
@ -323,39 +335,19 @@ void displayBuffer(IplImage *rgbDestImage, IplImage *buffer, int rValue, int gVa
|
||||
}
|
||||
};
|
||||
|
||||
void help()
|
||||
int main(int argc, char** argv )
|
||||
{
|
||||
printf("\nThis program demonstrates the contributed flesh detector CvAdaptiveSkinDetector \n"
|
||||
"which can be found in contrib.cpp \n"
|
||||
"Usage: \n"
|
||||
"./adaptiveskindetector [--fileMask]=<path to file, which are used in mask \n"
|
||||
" [--firstFrame]=<first frame number \n"
|
||||
" [--lastFrame]=<last frame number> \n"
|
||||
"if at least one parameter doesn't specified, it will try to use default webcam \n"
|
||||
"Expample: \n"
|
||||
" --fileMask = /home/user_home_directory/work/opencv/samples/c/temp_%%05d.jpg --firstFrame=0 --lastFrame=1000 \n");
|
||||
}
|
||||
|
||||
int main(int argc, const char** argv )
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string fileMask = parser.get<string>("fileMask");
|
||||
int firstFrame = parser.get<int>("firstFrame", 0);
|
||||
int lastFrame = parser.get<int>("lastFrame", 0);
|
||||
|
||||
IplImage *img, *filterMask = NULL;
|
||||
CvAdaptiveSkinDetector filter(1, CvAdaptiveSkinDetector::MORPHING_METHOD_ERODE_DILATE);
|
||||
ASDFrameSequencer *sequencer;
|
||||
CvFont base_font;
|
||||
char caption[2048], s[256], windowName[256];
|
||||
long int clockTotal = 0, numFrames = 0;
|
||||
std::clock_t clock;
|
||||
std::clock_t clock;
|
||||
|
||||
if (argc < 4)
|
||||
{
|
||||
help(argv);
|
||||
sequencer = new ASDFrameSequencerWebCam();
|
||||
(dynamic_cast<ASDFrameSequencerWebCam*>(sequencer))->open(-1);
|
||||
|
||||
@ -366,9 +358,8 @@ int main(int argc, const char** argv )
|
||||
}
|
||||
else
|
||||
{
|
||||
// A sequence of images captured from video source, is stored here
|
||||
sequencer = new ASDFrameSequencerImageFile();
|
||||
(dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(fileMask.c_str(), firstFrame, lastFrame );
|
||||
(dynamic_cast<ASDFrameSequencerImageFile*>(sequencer))->open(argv[1], std::atoi(argv[2]), std::atoi(argv[3]) ); // A sequence of images captured from video source, is stored here
|
||||
|
||||
}
|
||||
std::sprintf(windowName, "%s", "Adaptive Skin Detection Algorithm for Video Sequences");
|
||||
@ -376,6 +367,10 @@ int main(int argc, const char** argv )
|
||||
cvNamedWindow(windowName, CV_WINDOW_AUTOSIZE);
|
||||
cvInitFont( &base_font, CV_FONT_VECTOR0, 0.5, 0.5);
|
||||
|
||||
// Usage:
|
||||
// c:\>CvASDSample "C:\VideoSequences\sample1\right_view\temp_%05d.jpg" 0 1000
|
||||
|
||||
std::cout << "Press ESC to stop." << std::endl << std::endl;
|
||||
while ((img = sequencer->getNextImage()) != 0)
|
||||
{
|
||||
numFrames++;
|
||||
|
@ -25,14 +25,10 @@
|
||||
#include <stdlib.h>
|
||||
#include <ctype.h>
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/video/background_segm.hpp"
|
||||
#include <opencv2/imgproc/imgproc_c.h>
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
//VARIABLES for CODEBOOK METHOD:
|
||||
CvBGCodeBookModel* model = 0;
|
||||
const int NCHANNELS = 3;
|
||||
@ -42,28 +38,26 @@ void help(void)
|
||||
{
|
||||
printf("\nLearn background and find foreground using simple average and average difference learning method:\n"
|
||||
"Originally from the book: Learning OpenCV by O'Reilly press\n"
|
||||
"\nUsage:\n"
|
||||
"./bgfg_codebook [--nframes]=<frames number, 300 as default> \n"
|
||||
" [--input]=<movie filename or camera index, zero camera index as default>\n"
|
||||
"***Keep the focus on the video windows, NOT the consol***\n\n"
|
||||
"INTERACTIVE PARAMETERS:\n"
|
||||
"\tESC,q,Q - quit the program\n"
|
||||
"\th - print this help\n"
|
||||
"\tp - pause toggle\n"
|
||||
"\ts - single step\n"
|
||||
"\tr - run mode (single step off)\n"
|
||||
"=== AVG PARAMS ===\n"
|
||||
"\t- - bump high threshold UP by 0.25\n"
|
||||
"\t= - bump high threshold DOWN by 0.25\n"
|
||||
"\t[ - bump low threshold UP by 0.25\n"
|
||||
"\t] - bump low threshold DOWN by 0.25\n"
|
||||
"=== CODEBOOK PARAMS ===\n"
|
||||
"\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
|
||||
"\ta - adjust all 3 channels at once\n"
|
||||
"\tb - adjust both 2 and 3 at once\n"
|
||||
"\ti,o - bump upper threshold up,down by 1\n"
|
||||
"\tk,l - bump lower threshold up,down by 1\n"
|
||||
"\tSPACE - reset the model\n"
|
||||
"\nUSAGE:\nbgfg_codebook [--nframes=300] [movie filename, else from camera]\n"
|
||||
"***Keep the focus on the video windows, NOT the consol***\n\n"
|
||||
"INTERACTIVE PARAMETERS:\n"
|
||||
"\tESC,q,Q - quit the program\n"
|
||||
"\th - print this help\n"
|
||||
"\tp - pause toggle\n"
|
||||
"\ts - single step\n"
|
||||
"\tr - run mode (single step off)\n"
|
||||
"=== AVG PARAMS ===\n"
|
||||
"\t- - bump high threshold UP by 0.25\n"
|
||||
"\t= - bump high threshold DOWN by 0.25\n"
|
||||
"\t[ - bump low threshold UP by 0.25\n"
|
||||
"\t] - bump low threshold DOWN by 0.25\n"
|
||||
"=== CODEBOOK PARAMS ===\n"
|
||||
"\ty,u,v- only adjust channel 0(y) or 1(u) or 2(v) respectively\n"
|
||||
"\ta - adjust all 3 channels at once\n"
|
||||
"\tb - adjust both 2 and 3 at once\n"
|
||||
"\ti,o - bump upper threshold up,down by 1\n"
|
||||
"\tk,l - bump lower threshold up,down by 1\n"
|
||||
"\tSPACE - reset the model\n"
|
||||
);
|
||||
}
|
||||
|
||||
@ -71,20 +65,15 @@ void help(void)
|
||||
//USAGE: ch9_background startFrameCollection# endFrameCollection# [movie filename, else from camera]
|
||||
//If from AVI, then optionally add HighAvg, LowAvg, HighCB_Y LowCB_Y HighCB_U LowCB_U HighCB_V LowCB_V
|
||||
//
|
||||
int main(int argc, const char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string inputName = parser.get<string>("input", "0");
|
||||
int nframesToLearnBG = parser.get<int>("nframes", 300);
|
||||
|
||||
const char* filename = 0;
|
||||
IplImage* rawImage = 0, *yuvImage = 0; //yuvImage is for codebook method
|
||||
IplImage *ImaskCodeBook = 0,*ImaskCodeBookCC = 0;
|
||||
CvCapture* capture = 0;
|
||||
int c, n, nframes = 0;
|
||||
|
||||
int c, n, nframes = 0;
|
||||
int nframesToLearnBG = 300;
|
||||
|
||||
model = cvCreateBGCodeBookModel();
|
||||
|
||||
@ -98,30 +87,38 @@ int main(int argc, const char** argv)
|
||||
bool pause = false;
|
||||
bool singlestep = false;
|
||||
|
||||
if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
|
||||
for( n = 1; n < argc; n++ )
|
||||
{
|
||||
printf("Capture from camera\n");
|
||||
capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
|
||||
int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
|
||||
if( !capture)
|
||||
static const char* nframesOpt = "--nframes=";
|
||||
if( strncmp(argv[n], nframesOpt, strlen(nframesOpt))==0 )
|
||||
{
|
||||
printf ("Capture from CAM %d", c);
|
||||
printf (" didn't work\n");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Capture from file %s\n",inputName.c_str());
|
||||
capture = cvCreateFileCapture(inputName.c_str());
|
||||
if( !capture)
|
||||
if( sscanf(argv[n] + strlen(nframesOpt), "%d", &nframesToLearnBG) == 0 )
|
||||
{
|
||||
printf ("Capture from file %s", inputName.c_str());
|
||||
printf (" didn't work\n");
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
}
|
||||
else
|
||||
filename = argv[n];
|
||||
}
|
||||
|
||||
if( !filename )
|
||||
{
|
||||
printf("Capture from camera\n");
|
||||
capture = cvCaptureFromCAM( 0 );
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Capture from file %s\n",filename);
|
||||
capture = cvCreateFileCapture( filename );
|
||||
}
|
||||
|
||||
if( !capture )
|
||||
{
|
||||
printf( "Can not initialize video capturing\n\n" );
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
//MAIN PROCESSING LOOP:
|
||||
for(;;)
|
||||
|
@ -1,4 +1,3 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
@ -11,13 +10,13 @@ using namespace cv;
|
||||
|
||||
void help()
|
||||
{
|
||||
cout << "\nThis program demonstrates the cascade classifier. Now you can use Haar or LBP features.\n"
|
||||
cout << "\nThis program demonstrates the cascade recognizer. Now you can use Haar or LBP features.\n"
|
||||
"This classifier can recognize many ~rigid objects, it's most known use is for faces.\n"
|
||||
"Usage:\n"
|
||||
"./facedetect [--cascade=<cascade_path> this is the primary trained classifier such as frontal face]\n"
|
||||
" [--nested-cascade[=nested_cascade_path this an optional secondary classifier such as eyes]]\n"
|
||||
" [--scale=<image scale greater or equal to 1, try 1.3 for example>\n"
|
||||
" [--input=filename|camera_index]\n\n"
|
||||
" [filename|camera_index]\n\n"
|
||||
"see facedetect.cmd for one call:\n"
|
||||
"./facedetect --cascade=\"../../data/haarcascades/haarcascade_frontalface_alt.xml\" --nested-cascade=\"../../data/haarcascades/haarcascade_eye.xml\" --scale=1.3 \n"
|
||||
"Hit any key to quit.\n"
|
||||
@ -28,41 +27,70 @@ void detectAndDraw( Mat& img,
|
||||
CascadeClassifier& cascade, CascadeClassifier& nestedCascade,
|
||||
double scale);
|
||||
|
||||
String cascadeName = "../../data/haarcascades/haarcascade_frontalface_alt.xml";
|
||||
String nestedCascadeName = "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml";
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string cascadeName = parser.get<string>("cascade", "../../data/haarcascades/haarcascade_frontalface_alt.xml");
|
||||
string nestedCascadeName = parser.get<string>("nested-cascade", "../../data/haarcascades/haarcascade_eye_tree_eyeglasses.xml");
|
||||
double scale = parser.get<double>("scale", 1.0);
|
||||
string inputName = parser.get<string>("input", "0"); //read from camera by default
|
||||
|
||||
if (!cascadeName.empty())
|
||||
cout << " from which we have cascadeName= " << cascadeName << endl;
|
||||
|
||||
if (!nestedCascadeName.empty())
|
||||
cout << " from which we have nestedCascadeName= " << nestedCascadeName << endl;
|
||||
|
||||
CvCapture* capture = 0;
|
||||
Mat frame, frameCopy, image;
|
||||
const String scaleOpt = "--scale=";
|
||||
size_t scaleOptLen = scaleOpt.length();
|
||||
const String cascadeOpt = "--cascade=";
|
||||
size_t cascadeOptLen = cascadeOpt.length();
|
||||
const String nestedCascadeOpt = "--nested-cascade";
|
||||
size_t nestedCascadeOptLen = nestedCascadeOpt.length();
|
||||
String inputName;
|
||||
|
||||
help();
|
||||
|
||||
CascadeClassifier cascade, nestedCascade;
|
||||
double scale = 1;
|
||||
|
||||
for( int i = 1; i < argc; i++ )
|
||||
{
|
||||
cout << "Processing " << i << " " << argv[i] << endl;
|
||||
if( cascadeOpt.compare( 0, cascadeOptLen, argv[i], cascadeOptLen ) == 0 )
|
||||
{
|
||||
cascadeName.assign( argv[i] + cascadeOptLen );
|
||||
cout << " from which we have cascadeName= " << cascadeName << endl;
|
||||
}
|
||||
else if( nestedCascadeOpt.compare( 0, nestedCascadeOptLen, argv[i], nestedCascadeOptLen ) == 0 )
|
||||
{
|
||||
if( argv[i][nestedCascadeOpt.length()] == '=' )
|
||||
nestedCascadeName.assign( argv[i] + nestedCascadeOpt.length() + 1 );
|
||||
if( !nestedCascade.load( nestedCascadeName ) )
|
||||
cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
|
||||
}
|
||||
else if( scaleOpt.compare( 0, scaleOptLen, argv[i], scaleOptLen ) == 0 )
|
||||
{
|
||||
if( !sscanf( argv[i] + scaleOpt.length(), "%lf", &scale ) || scale < 1 )
|
||||
scale = 1;
|
||||
cout << " from which we read scale = " << scale << endl;
|
||||
}
|
||||
else if( argv[i][0] == '-' )
|
||||
{
|
||||
cerr << "WARNING: Unknown option %s" << argv[i] << endl;
|
||||
}
|
||||
else
|
||||
inputName.assign( argv[i] );
|
||||
}
|
||||
|
||||
if( !cascade.load( cascadeName ) )
|
||||
{
|
||||
cerr << "ERROR: Could not load classifier cascade" << endl;
|
||||
cerr << "Usage: facedetect [--cascade=<cascade_path>]\n"
|
||||
" [--nested-cascade[=nested_cascade_path]]\n"
|
||||
" [--scale[=<image scale>\n"
|
||||
" [filename|camera_index]\n" << endl ;
|
||||
return -1;
|
||||
}
|
||||
|
||||
if( !nestedCascade.load( nestedCascadeName ) )
|
||||
cerr << "WARNING: Could not load classifier cascade for nested objects" << endl;
|
||||
|
||||
if( inputName.empty() || (isdigit(inputName.c_str()[0]) && inputName.c_str()[1] == '\0') )
|
||||
{
|
||||
capture = cvCaptureFromCAM( inputName.empty() ? 0 : inputName.c_str()[0] - '0' );
|
||||
int c = inputName.empty() ? 0 : inputName.c_str()[0] - '0' ;
|
||||
if( !capture) cout << "Capture from CAM " << c << " didn't work" << endl;
|
||||
if(!capture) cout << "Capture from CAM " << c << " didn't work" << endl;
|
||||
}
|
||||
else if( inputName.size() )
|
||||
{
|
||||
@ -70,9 +98,14 @@ int main( int argc, const char** argv )
|
||||
if( image.empty() )
|
||||
{
|
||||
capture = cvCaptureFromAVI( inputName.c_str() );
|
||||
if( !capture ) cout << "Capture from AVI didn't work" << endl;
|
||||
if(!capture) cout << "Capture from AVI didn't work" << endl;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
image = imread( "lena.jpg", 1 );
|
||||
if(image.empty()) cout << "Couldn't read lena.jpg" << endl;
|
||||
}
|
||||
|
||||
cvNamedWindow( "result", 1 );
|
||||
|
||||
|
@ -4,30 +4,26 @@
|
||||
* Author: Liu Liu
|
||||
* liuliu.1987+opencv@gmail.com
|
||||
*/
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include <opencv2/imgproc/imgproc_c.h>
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf( "\n This program demonstrated the use of the SURF Detector and Descriptor using\n"
|
||||
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
|
||||
"on planar objects.\n"
|
||||
"Usage: \n"
|
||||
"./find_obj [--object_filename]=<object_filename, box.png as default> \n"
|
||||
" [--scene_filename]=<scene_filename box_in_scene.png as default>] \n"
|
||||
"Example: \n"
|
||||
"./find_obj --object_filename=box.png --scene_filename=box_in_scene.png \n\n"
|
||||
);
|
||||
printf(
|
||||
"This program demonstrated the use of the SURF Detector and Descriptor using\n"
|
||||
"either FLANN (fast approx nearst neighbor classification) or brute force matching\n"
|
||||
"on planar objects.\n"
|
||||
"Call:\n"
|
||||
"./find_obj [<object_filename default box.png> <scene_filename default box_in_scene.png>]\n\n"
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
// define whether to use approximate nearest-neighbor search
|
||||
@ -213,16 +209,13 @@ locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors
|
||||
return 1;
|
||||
}
|
||||
|
||||
int main(int argc, const char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string objectFileName = parser.get<string>("object_filename", "box.png");
|
||||
string sceneFileName = parser.get<string>("scene_filename", "box_in_scene.png");
|
||||
const char* object_filename = argc == 3 ? argv[1] : "box.png";
|
||||
const char* scene_filename = argc == 3 ? argv[2] : "box_in_scene.png";
|
||||
|
||||
CvMemStorage* storage = cvCreateMemStorage(0);
|
||||
help();
|
||||
cvNamedWindow("Object", 1);
|
||||
cvNamedWindow("Object Correspond", 1);
|
||||
|
||||
@ -239,11 +232,13 @@ int main(int argc, const char** argv)
|
||||
{{255,255,255}}
|
||||
};
|
||||
|
||||
IplImage* object = cvLoadImage( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
|
||||
IplImage* image = cvLoadImage( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
|
||||
IplImage* object = cvLoadImage( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
||||
IplImage* image = cvLoadImage( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
||||
if( !object || !image )
|
||||
{
|
||||
fprintf( stderr, "Can not load %s and/or %s\n", objectFileName.c_str(), sceneFileName.c_str() );
|
||||
fprintf( stderr, "Can not load %s and/or %s\n"
|
||||
"Usage: find_obj [<object_filename> <scene_filename>]\n",
|
||||
object_filename, scene_filename );
|
||||
exit(-1);
|
||||
}
|
||||
IplImage* object_color = cvCreateImage(cvGetSize(object), 8, 3);
|
||||
|
@ -11,17 +11,14 @@ using namespace cv;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\n This program shows the use of the Calonder point descriptor classifier \n"
|
||||
"SURF is used to detect interest points, Calonder is used to describe/match these points \n"
|
||||
"Usage: \n"
|
||||
"./find_obj_calonder --classifier_file=<classifier file, there is no default classifier file. You should create it at first and when you can use it for test> \n"
|
||||
" --test_image=<image file for test, lena.jpg as default> \n"
|
||||
" [--train_container]=<txt file with train images filenames> \n"
|
||||
"Example: \n"
|
||||
" --classifier_file=test_classifier --test_image=lena.jpg --train_container=one_way_train_images.txt \n"
|
||||
" the test_classifier is created here using --train_container and tested witn --test_image at the end \n"
|
||||
" --classifier_file=test_classifier --test_image=lena.jpg \n"
|
||||
" the test classifier is tested here using lena.jpg \n");
|
||||
cout << "This program shows the use of the Calonder point descriptor classifier"
|
||||
"SURF is used to detect interest points, Calonder is used to describe/match these points\n"
|
||||
"Format:" << endl <<
|
||||
" classifier_file(to write) test_image file_with_train_images_filenames(txt)" <<
|
||||
" or" << endl <<
|
||||
" classifier_file(to read) test_image"
|
||||
"Using OpenCV version %s\n" << CV_VERSION << "\n"
|
||||
<< endl;
|
||||
}
|
||||
/*
|
||||
* Generates random perspective transform of image
|
||||
@ -147,27 +144,18 @@ void testCalonderClassifier( const string& classifierFilename, const string& img
|
||||
waitKey();
|
||||
}
|
||||
|
||||
int main( int argc, const char **argv )
|
||||
int main( int argc, char **argv )
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string classifierFileName = parser.get<string>("classifier_file");
|
||||
string testImageFileName = parser.get<string>("test_image", "lena.jpg");
|
||||
string trainContainerFileName = parser.get<string>("train_container");
|
||||
|
||||
if( classifierFileName.empty())
|
||||
if( argc != 4 && argc != 3 )
|
||||
{
|
||||
printf("\n Can't find classifier file, please select file for --classifier_file parameter \n");
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
|
||||
if( !trainContainerFileName.empty())
|
||||
trainCalonderClassifier( classifierFileName.c_str(), trainContainerFileName.c_str() );
|
||||
if( argc == 4 )
|
||||
trainCalonderClassifier( argv[1], argv[3] );
|
||||
|
||||
testCalonderClassifier( classifierFileName.c_str(), testImageFileName.c_str() );
|
||||
testCalonderClassifier( argv[1], argv[2] );
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -9,37 +9,30 @@
|
||||
#include <vector>
|
||||
|
||||
using namespace cv;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf( "This program shows the use of the \"fern\" plannar PlanarObjectDetector point\n"
|
||||
"descriptor classifier"
|
||||
"Usage: \n"
|
||||
"./find_obj_ferns [--object_filename]=<object_filename, box.png as default> \n"
|
||||
" [--scene_filename]=<scene_filename box_in_scene.png as default>] \n"
|
||||
"Example: \n"
|
||||
"./find_obj_ferns --object_filename=box.png --scene_filename=box_in_scene.png \n");
|
||||
"descriptor classifier"
|
||||
"Usage:\n"
|
||||
"./find_obj_ferns [<object_filename default: box.png> <scene_filename default:box_in_scene.png>]\n"
|
||||
"\n");
|
||||
}
|
||||
|
||||
int main(int argc, const char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
const char* object_filename = argc > 1 ? argv[1] : "box.png";
|
||||
const char* scene_filename = argc > 2 ? argv[2] : "box_in_scene.png";
|
||||
int i;
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string objectFileName = parser.get<string>("object_filename", "box.png");
|
||||
string sceneFileName = parser.get<string>("scene_filename", "box_in_scene.png");
|
||||
|
||||
cvNamedWindow("Object", 1);
|
||||
cvNamedWindow("Image", 1);
|
||||
cvNamedWindow("Object Correspondence", 1);
|
||||
|
||||
Mat object = imread( objectFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
|
||||
Mat object = imread( object_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
||||
Mat image;
|
||||
|
||||
double imgscale = 1;
|
||||
|
||||
Mat _image = imread( sceneFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
|
||||
Mat _image = imread( scene_filename, CV_LOAD_IMAGE_GRAYSCALE );
|
||||
resize(_image, image, Size(), 1./imgscale, 1./imgscale, INTER_CUBIC);
|
||||
|
||||
|
||||
@ -47,7 +40,7 @@ int main(int argc, const char** argv)
|
||||
{
|
||||
fprintf( stderr, "Can not load %s and/or %s\n"
|
||||
"Usage: find_obj_ferns [<object_filename> <scene_filename>]\n",
|
||||
objectFileName.c_str(), sceneFileName.c_str() );
|
||||
object_filename, scene_filename );
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
@ -67,7 +60,7 @@ int main(int argc, const char** argv)
|
||||
vector<KeyPoint> objKeypoints, imgKeypoints;
|
||||
PatchGenerator gen(0,256,5,true,0.8,1.2,-CV_PI/2,CV_PI/2,-CV_PI/2,CV_PI/2);
|
||||
|
||||
string model_filename = format("%s_model.xml.gz", objectFileName.c_str());
|
||||
string model_filename = format("%s_model.xml.gz", object_filename);
|
||||
printf("Trying to load %s ...\n", model_filename.c_str());
|
||||
FileStorage fs(model_filename, FileStorage::READ);
|
||||
if( fs.isOpened() )
|
||||
@ -113,7 +106,6 @@ int main(int argc, const char** argv)
|
||||
t = (double)getTickCount() - t;
|
||||
printf("%gms\n", t*1000/getTickFrequency());
|
||||
|
||||
int i = 0;
|
||||
if( found )
|
||||
{
|
||||
for( i = 0; i < 4; i++ )
|
||||
|
@ -1,11 +1,9 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/objdetect/objdetect.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
#include "cvconfig.h"
|
||||
#include <cvconfig.h>
|
||||
#endif
|
||||
#ifdef HAVE_TBB
|
||||
#include "tbb/task_scheduler_init.h"
|
||||
@ -15,41 +13,42 @@ using namespace cv;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf( "This program demonstrated the use of the latentSVM detector.\n"
|
||||
"It reads in a trained object model and then uses that to detect the object in an image\n"
|
||||
"Usage: \n"
|
||||
"./latentsvmdetect [--image_filename]=<image_filename, cat.jpg as default> \n"
|
||||
" [--model_filename]=<model_filename, cat.xml as default> \n"
|
||||
" [--threads_number]=<number of threads, -1 as default>\n"
|
||||
"Example: \n"
|
||||
"./latentsvmdetect --image_filename=cat.jpg --model_filename=cat.xml --threads_number=7 \n"
|
||||
" Press any key to quit.\n");
|
||||
printf( "This program demonstrated the use of the latentSVM detector.\n"
|
||||
"It reads in a trained object model and then uses that to detect the object in an image\n"
|
||||
"Call:\n"
|
||||
"./latentsvmdetect [<image_filename> <model_filename> [<threads_number>]]\n"
|
||||
" The defaults for image_filename and model_filename are cat.jpg and cat.xml respectively\n"
|
||||
" Press any key to quit.\n");
|
||||
}
|
||||
|
||||
const char* model_filename = "cat.xml";
|
||||
const char* image_filename = "cat.jpg";
|
||||
int tbbNumThreads = -1;
|
||||
|
||||
void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, int numThreads = -1)
|
||||
{
|
||||
CvMemStorage* storage = cvCreateMemStorage(0);
|
||||
CvSeq* detections = 0;
|
||||
int i = 0;
|
||||
int64 start = 0, finish = 0;
|
||||
int64 start = 0, finish = 0;
|
||||
#ifdef HAVE_TBB
|
||||
tbb::task_scheduler_init init(tbb::task_scheduler_init::deferred);
|
||||
if (numThreads > 0)
|
||||
{
|
||||
init.initialize(numThreads);
|
||||
if (numThreads > 0)
|
||||
{
|
||||
init.initialize(numThreads);
|
||||
printf("Number of threads %i\n", numThreads);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Number of threads is not correct for TBB version");
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("Number of threads is not correct for TBB version");
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
start = cvGetTickCount();
|
||||
|
||||
start = cvGetTickCount();
|
||||
detections = cvLatentSvmDetectObjects(image, detector, storage, 0.5f, numThreads);
|
||||
finish = cvGetTickCount();
|
||||
printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));
|
||||
finish = cvGetTickCount();
|
||||
printf("detection time = %.3f\n", (float)(finish - start) / (float)(cvGetTickFrequency() * 1000000.0));
|
||||
|
||||
#ifdef HAVE_TBB
|
||||
init.terminate();
|
||||
@ -57,43 +56,43 @@ void detect_and_draw_objects( IplImage* image, CvLatentSvmDetector* detector, in
|
||||
for( i = 0; i < detections->total; i++ )
|
||||
{
|
||||
CvObjectDetection detection = *(CvObjectDetection*)cvGetSeqElem( detections, i );
|
||||
CvRect bounding_box = detection.rect;
|
||||
CvRect bounding_box = detection.rect;
|
||||
cvRectangle( image, cvPoint(bounding_box.x, bounding_box.y),
|
||||
cvPoint(bounding_box.x + bounding_box.width,
|
||||
bounding_box.y + bounding_box.height),
|
||||
bounding_box.y + bounding_box.height),
|
||||
CV_RGB(255,0,0), 3 );
|
||||
}
|
||||
cvReleaseMemStorage( &storage );
|
||||
}
|
||||
|
||||
int main(int argc, const char* argv[])
|
||||
int main(int argc, char* argv[])
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string imageFileName = parser.get<string>("image_filename", "cat.jpg");
|
||||
string modelFileName = parser.get<string>("model_filename", "cat.xml");
|
||||
int tbbNumThreads = parser.get<int>("threads_number", -1);
|
||||
|
||||
IplImage* image = cvLoadImage(imageFileName.c_str());
|
||||
if (!image)
|
||||
{
|
||||
printf( "Unable to load the image\n"
|
||||
help();
|
||||
if (argc > 2)
|
||||
{
|
||||
image_filename = argv[1];
|
||||
model_filename = argv[2];
|
||||
if (argc > 3)
|
||||
{
|
||||
tbbNumThreads = atoi(argv[3]);
|
||||
}
|
||||
}
|
||||
IplImage* image = cvLoadImage(image_filename);
|
||||
if (!image)
|
||||
{
|
||||
printf( "Unable to load the image\n"
|
||||
"Pass it as the first parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );
|
||||
return -1;
|
||||
}
|
||||
CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(modelFileName.c_str());
|
||||
if (!detector)
|
||||
{
|
||||
printf( "Unable to load the model\n"
|
||||
return -1;
|
||||
}
|
||||
CvLatentSvmDetector* detector = cvLoadLatentSvmDetector(model_filename);
|
||||
if (!detector)
|
||||
{
|
||||
printf( "Unable to load the model\n"
|
||||
"Pass it as the second parameter: latentsvmdetect <path to cat.jpg> <path to cat.xml>\n" );
|
||||
cvReleaseImage( &image );
|
||||
return -1;
|
||||
}
|
||||
|
||||
cvReleaseImage( &image );
|
||||
return -1;
|
||||
}
|
||||
detect_and_draw_objects( image, detector, tbbNumThreads );
|
||||
|
||||
cvNamedWindow( "test", 0 );
|
||||
cvShowImage( "test", image );
|
||||
cvWaitKey(0);
|
||||
@ -101,5 +100,5 @@ int main(int argc, const char* argv[])
|
||||
cvReleaseImage( &image );
|
||||
cvDestroyAllWindows();
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
}
|
||||
|
@ -2,24 +2,17 @@
|
||||
* Copyright<EFBFBD> 2009, Liu Liu All rights reserved.
|
||||
*/
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
|
||||
"It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
|
||||
"\n Usage: \n"
|
||||
"./mser_sample [--image_filename] <path_and_image_filename, default is 'puzzle.png'> \n"
|
||||
"Example: \n"
|
||||
"./mser_sample --image_filename=puzzle.png \n");
|
||||
printf("\nThis program demonstrates the Maximal Extremal Region interest point detector.\n"
|
||||
"It finds the most stable (in size) dark and white regions as a threshold is increased.\n"
|
||||
"\nCall:\n"
|
||||
"./mser_sample <path_and_image_filename, Default is 'puzzle.png'>\n\n");
|
||||
}
|
||||
|
||||
static CvScalar colors[] =
|
||||
@ -51,81 +44,90 @@ static uchar bcolors[][3] =
|
||||
};
|
||||
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
help();
|
||||
char path[1024];
|
||||
IplImage* img;
|
||||
help();
|
||||
if (argc!=2)
|
||||
{
|
||||
strcpy(path,"puzzle.png");
|
||||
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
|
||||
if (!img)
|
||||
{
|
||||
printf("\nUsage: mser_sample <path_to_image>\n");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
strcpy(path,argv[1]);
|
||||
img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
|
||||
}
|
||||
|
||||
if (!img)
|
||||
{
|
||||
printf("Unable to load image %s\n",path);
|
||||
return 0;
|
||||
}
|
||||
IplImage* rsp = cvLoadImage( path, CV_LOAD_IMAGE_COLOR );
|
||||
IplImage* ellipses = cvCloneImage(rsp);
|
||||
cvCvtColor(img,ellipses,CV_GRAY2BGR);
|
||||
CvSeq* contours;
|
||||
CvMemStorage* storage= cvCreateMemStorage();
|
||||
IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
|
||||
cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
|
||||
CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
double t = (double)cvGetTickCount();
|
||||
cvExtractMSER( hsv, NULL, &contours, storage, params );
|
||||
t = cvGetTickCount() - t;
|
||||
printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
|
||||
uchar* rsptr = (uchar*)rsp->imageData;
|
||||
// draw mser with different color
|
||||
for ( int i = contours->total-1; i >= 0; i-- )
|
||||
{
|
||||
CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
|
||||
for ( int j = 0; j < r->total; j++ )
|
||||
{
|
||||
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
|
||||
rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
|
||||
rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
|
||||
rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
|
||||
}
|
||||
}
|
||||
// find ellipse ( it seems cvfitellipse2 have error or sth?
|
||||
for ( int i = 0; i < contours->total; i++ )
|
||||
{
|
||||
CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
|
||||
CvBox2D box = cvFitEllipse2( r );
|
||||
box.angle=(float)CV_PI/2-box.angle;
|
||||
|
||||
if ( r->color > 0 )
|
||||
cvEllipseBox( ellipses, box, colors[9], 2 );
|
||||
else
|
||||
cvEllipseBox( ellipses, box, colors[2], 2 );
|
||||
|
||||
}
|
||||
|
||||
string imageFileName = parser.get<string>("image_filename", "puzzle.png");
|
||||
cvSaveImage( "rsp.png", rsp );
|
||||
|
||||
IplImage* img;
|
||||
cvNamedWindow( "original", 0 );
|
||||
cvShowImage( "original", img );
|
||||
|
||||
cvNamedWindow( "response", 0 );
|
||||
cvShowImage( "response", rsp );
|
||||
|
||||
img = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_GRAYSCALE );
|
||||
if (!img)
|
||||
{
|
||||
printf("Unable to load image %s\n",imageFileName.c_str());
|
||||
help();
|
||||
return 0;
|
||||
}
|
||||
cvNamedWindow( "ellipses", 0 );
|
||||
cvShowImage( "ellipses", ellipses );
|
||||
|
||||
IplImage* rsp = cvLoadImage( imageFileName.c_str(), CV_LOAD_IMAGE_COLOR );
|
||||
IplImage* ellipses = cvCloneImage(rsp);
|
||||
cvCvtColor(img,ellipses,CV_GRAY2BGR);
|
||||
CvSeq* contours;
|
||||
CvMemStorage* storage= cvCreateMemStorage();
|
||||
IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
|
||||
cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
|
||||
CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );
|
||||
cvWaitKey(0);
|
||||
|
||||
double t = (double)cvGetTickCount();
|
||||
cvExtractMSER( hsv, NULL, &contours, storage, params );
|
||||
t = cvGetTickCount() - t;
|
||||
printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
|
||||
uchar* rsptr = (uchar*)rsp->imageData;
|
||||
// draw mser with different color
|
||||
for ( int i = contours->total-1; i >= 0; i-- )
|
||||
{
|
||||
CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
|
||||
for ( int j = 0; j < r->total; j++ )
|
||||
{
|
||||
CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
|
||||
rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
|
||||
rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
|
||||
rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
|
||||
}
|
||||
}
|
||||
// find ellipse ( it seems cvfitellipse2 have error or sth?
|
||||
for ( int i = 0; i < contours->total; i++ )
|
||||
{
|
||||
CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
|
||||
CvBox2D box = cvFitEllipse2( r );
|
||||
box.angle=(float)CV_PI/2-box.angle;
|
||||
|
||||
if ( r->color > 0 )
|
||||
cvEllipseBox( ellipses, box, colors[9], 2 );
|
||||
else
|
||||
cvEllipseBox( ellipses, box, colors[2], 2 );
|
||||
|
||||
}
|
||||
|
||||
cvSaveImage( "rsp.png", rsp );
|
||||
|
||||
cvNamedWindow( "original", 0 );
|
||||
cvShowImage( "original", img );
|
||||
|
||||
cvNamedWindow( "response", 0 );
|
||||
cvShowImage( "response", rsp );
|
||||
|
||||
cvNamedWindow( "ellipses", 0 );
|
||||
cvShowImage( "ellipses", ellipses );
|
||||
|
||||
cvWaitKey(0);
|
||||
|
||||
cvDestroyWindow( "original" );
|
||||
cvDestroyWindow( "response" );
|
||||
cvDestroyWindow( "ellipses" );
|
||||
cvReleaseImage(&rsp);
|
||||
cvReleaseImage(&img);
|
||||
cvReleaseImage(&ellipses);
|
||||
cvDestroyWindow( "original" );
|
||||
cvDestroyWindow( "response" );
|
||||
cvDestroyWindow( "ellipses" );
|
||||
cvReleaseImage(&rsp);
|
||||
cvReleaseImage(&img);
|
||||
cvReleaseImage(&ellipses);
|
||||
|
||||
}
|
||||
|
@ -7,24 +7,18 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc_c.h"
|
||||
|
||||
#include <string>
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
|
||||
"Correspondences are drawn\n"
|
||||
"Usage: \n"
|
||||
"./one_way_sample [--path]=<path_to_samples, '../../../opencv/samples/c' as default> \n"
|
||||
" [--first_image]=<first image file, scene_l.bmp as default> \n"
|
||||
" [--second_image]=<second image file, scene_r.bmp as default>\n"
|
||||
"For example: \n"
|
||||
" ./one_way_sample --path=../../../opencv/samples/c --first_image=scene_l.bmp --second_image=scene_r.bmp \n");
|
||||
printf("\nThis program demonstrates the one way interest point descriptor found in features2d.hpp\n"
|
||||
"Correspondences are drawn\n");
|
||||
printf("Format: \n./one_way_sample [path_to_samples] [image1] [image2]\n");
|
||||
printf("For example: ./one_way_sample ../../../opencv/samples/c scene_l.bmp scene_r.bmp\n");
|
||||
}
|
||||
|
||||
using namespace cv;
|
||||
@ -32,19 +26,21 @@ using namespace cv;
|
||||
IplImage* DrawCorrespondences(IplImage* img1, const vector<KeyPoint>& features1, IplImage* img2,
|
||||
const vector<KeyPoint>& features2, const vector<int>& desc_idx);
|
||||
|
||||
int main(int argc, const char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
std::string path_name = parser.get<string>("path", "../../../opencv/samples/c");
|
||||
std::string img1_name = path_name + "/" + parser.get<string>("first_image", "scene_l.bmp");
|
||||
std::string img2_name = path_name + "/" + parser.get<string>("second_image", "scene_r.bmp");
|
||||
|
||||
const char images_list[] = "one_way_train_images.txt";
|
||||
const CvSize patch_size = cvSize(24, 24);
|
||||
const int pose_count = 1; //50
|
||||
const int pose_count = 50;
|
||||
|
||||
if (argc != 3 && argc != 4)
|
||||
{
|
||||
help();
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::string path_name = argv[1];
|
||||
std::string img1_name = path_name + "/" + std::string(argv[2]);
|
||||
std::string img2_name = path_name + "/" + std::string(argv[3]);
|
||||
|
||||
printf("Reading the images...\n");
|
||||
IplImage* img1 = cvLoadImage(img1_name.c_str(), CV_LOAD_IMAGE_GRAYSCALE);
|
||||
|
@ -1,26 +1,21 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/ml/ml.hpp"
|
||||
#include "opencv2/core/core_c.h"
|
||||
#include <stdio.h>
|
||||
#include <map>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf(
|
||||
"\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
|
||||
"CvDTree dtree;\n"
|
||||
"CvBoost boost;\n"
|
||||
"CvRTrees rtrees;\n"
|
||||
"CvERTrees ertrees;\n"
|
||||
"CvGBTrees gbtrees;\n"
|
||||
"Usage: \n"
|
||||
" ./tree_engine [--response_column]=<specified the 0-based index of the response, 0 as default> \n"
|
||||
"[--categorical_response]=<specifies that the response is categorical, 0-false, 1-true, 0 as default> \n"
|
||||
"[--csv_filename]=<is the name of training data file in comma-separated value format> \n"
|
||||
);
|
||||
printf(
|
||||
"\nThis sample demonstrates how to use different decision trees and forests including boosting and random trees:\n"
|
||||
"CvDTree dtree;\n"
|
||||
"CvBoost boost;\n"
|
||||
"CvRTrees rtrees;\n"
|
||||
"CvERTrees ertrees;\n"
|
||||
"CvGBTrees gbtrees;\n"
|
||||
"Call:\n\t./tree_engine [-r <response_column>] [-c] <csv filename>\n"
|
||||
"where -r <response_column> specified the 0-based index of the response (0 by default)\n"
|
||||
"-c specifies that the response is categorical (it's ordered by default) and\n"
|
||||
"<csv filename> is the name of training data file in comma-separated value format\n\n");
|
||||
}
|
||||
|
||||
|
||||
@ -64,24 +59,34 @@ void print_result(float train_err, float test_err, const CvMat* _var_imp)
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, const char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string filename = parser.get<string>("csv_filename");
|
||||
int response_idx = parser.get<int>("response_column", 0);
|
||||
bool categorical_response = (bool)parser.get<int>("categorical_response", 1);
|
||||
|
||||
if(filename.empty())
|
||||
if(argc < 2)
|
||||
{
|
||||
printf("\n Please, select value for --csv_filename key \n");
|
||||
help();
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
const char* filename = 0;
|
||||
int response_idx = 0;
|
||||
bool categorical_response = false;
|
||||
|
||||
for(int i = 1; i < argc; i++)
|
||||
{
|
||||
if(strcmp(argv[i], "-r") == 0)
|
||||
sscanf(argv[++i], "%d", &response_idx);
|
||||
else if(strcmp(argv[i], "-c") == 0)
|
||||
categorical_response = true;
|
||||
else if(argv[i][0] != '-' )
|
||||
filename = argv[i];
|
||||
else
|
||||
{
|
||||
printf("Error. Invalid option %s\n", argv[i]);
|
||||
help();
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
printf("\nReading in %s...\n\n",filename.c_str());
|
||||
printf("\nReading in %s...\n\n",filename);
|
||||
CvDTree dtree;
|
||||
CvBoost boost;
|
||||
CvRTrees rtrees;
|
||||
@ -93,7 +98,7 @@ int main(int argc, const char** argv)
|
||||
|
||||
CvTrainTestSplit spl( 0.5f );
|
||||
|
||||
if ( data.read_csv( filename.c_str() ) == 0)
|
||||
if ( data.read_csv( filename ) == 0)
|
||||
{
|
||||
data.set_response_idx( response_idx );
|
||||
if(categorical_response)
|
||||
|
@ -1,4 +1,3 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
@ -28,26 +27,29 @@ const string bowImageDescriptorsDir = "/bowImageDescriptors";
|
||||
const string svmsDir = "/svms";
|
||||
const string plotsDir = "/plots";
|
||||
|
||||
void help()
|
||||
void help(char** argv)
|
||||
{
|
||||
printf("\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
|
||||
"It shows how to use detectors, descriptors and recognition methods \n"
|
||||
"Usage: \n"
|
||||
"Format:\n"
|
||||
"./bagofwords_classification \n"
|
||||
"--voc_path=<Path to Pascal VOC data (e.g. /home/my/VOCdevkit/VOC2010). \n"
|
||||
" Note: VOC2007-VOC2010 are supported.> \n"
|
||||
"--result_directory=<Path to result directory. Following folders will be created in [result directory]: \n"
|
||||
" bowImageDescriptors - to store image descriptors, \n"
|
||||
" svms - to store trained svms, \n"
|
||||
" plots - to store files for plots creating. \n"
|
||||
"[--feature_detector]=<Feature detector name (e.g. SURF, FAST...) - see createFeatureDetector() function in detectors.cpp \n"
|
||||
" Currently 12/2010, this is FAST, STAR, SIFT, SURF, MSER, GFTT, HARRIS> \n"
|
||||
"[--descriptor_extractor]=<Descriptor extractor name (e.g. SURF, SIFT) - see createDescriptorExtractor() function in descriptors.cpp \n"
|
||||
" Currently 12/2010, this is SURF, OpponentSIFT, SIFT, OpponentSURF, BRIEF> \n"
|
||||
"[--descriptor_matcher]=<Descriptor matcher name (e.g. BruteForce) - see createDescriptorMatcher() function in matchers.cpp \n"
|
||||
" Currently 12/2010, this is BruteForce, BruteForce-L1, FlannBased, BruteForce-Hamming, BruteForce-HammingLUT> \n"
|
||||
"\n");
|
||||
cout << "\nThis program shows how to read in, train on and produce test results for the PASCAL VOC (Visual Object Challenge) data. \n"
|
||||
<< "It shows how to use detectors, descriptors and recognition methods \n"
|
||||
"Using OpenCV version %s\n" << CV_VERSION << "\n"
|
||||
<< "Call: \n"
|
||||
<< "Format:\n ./" << argv[0] << " [VOC path] [result directory] \n"
|
||||
<< " or: \n"
|
||||
<< " ./" << argv[0] << " [VOC path] [result directory] [feature detector] [descriptor extractor] [descriptor matcher] \n"
|
||||
<< "\n"
|
||||
<< "Input parameters: \n"
|
||||
<< "[VOC path] Path to Pascal VOC data (e.g. /home/my/VOCdevkit/VOC2010). Note: VOC2007-VOC2010 are supported. \n"
|
||||
<< "[result directory] Path to result diractory. Following folders will be created in [result directory]: \n"
|
||||
<< " bowImageDescriptors - to store image descriptors, \n"
|
||||
<< " svms - to store trained svms, \n"
|
||||
<< " plots - to store files for plots creating. \n"
|
||||
<< "[feature detector] Feature detector name (e.g. SURF, FAST...) - see createFeatureDetector() function in detectors.cpp \n"
|
||||
<< " Currently 12/2010, this is FAST, STAR, SIFT, SURF, MSER, GFTT, HARRIS \n"
|
||||
<< "[descriptor extractor] Descriptor extractor name (e.g. SURF, SIFT) - see createDescriptorExtractor() function in descriptors.cpp \n"
|
||||
<< " Currently 12/2010, this is SURF, OpponentSIFT, SIFT, OpponentSURF, BRIEF \n"
|
||||
<< "[descriptor matcher] Descriptor matcher name (e.g. BruteForce) - see createDescriptorMatcher() function in matchers.cpp \n"
|
||||
<< " Currently 12/2010, this is BruteForce, BruteForce-L1, FlannBased, BruteForce-Hamming, BruteForce-HammingLUT \n"
|
||||
<< "\n";
|
||||
}
|
||||
|
||||
|
||||
@ -2505,24 +2507,16 @@ void computeGnuPlotOutput( const string& resPath, const string& objClassName, Vo
|
||||
|
||||
|
||||
|
||||
int main(int argc, const char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
const string vocPath = parser.get<string>("--voc_path");
|
||||
const string resPath = parser.get<string>("--result_directory");
|
||||
const string featureDetectName = parser.get<string>("--feature_detector");
|
||||
const string descExtName = parser.get<string>("--descriptor_extractor");
|
||||
const string descMatchName = parser.get<string>("--descriptor_matcher");
|
||||
|
||||
if( vocPath.empty() || resPath.empty())
|
||||
if( argc != 3 && argc != 6 )
|
||||
{
|
||||
help();
|
||||
printf("Cannot find --voc_path=%s or --result_directory=%s\n", vocPath.c_str(), resPath.c_str());
|
||||
help(argv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
const string vocPath = argv[1], resPath = argv[2];
|
||||
|
||||
// Read or set default parameters
|
||||
string vocName;
|
||||
DDMParams ddmParams;
|
||||
@ -2540,12 +2534,12 @@ int main(int argc, const char** argv)
|
||||
else
|
||||
{
|
||||
vocName = getVocName(vocPath);
|
||||
if( featureDetectName.empty() || descExtName.empty() || descMatchName.empty())
|
||||
if( argc!= 6 )
|
||||
{
|
||||
cout << "Feature detector, descriptor extractor, descriptor matcher must be set" << endl;
|
||||
return -1;
|
||||
}
|
||||
ddmParams = DDMParams( featureDetectName.c_str(), descExtName.c_str(), descMatchName.c_str()); // from command line
|
||||
ddmParams = DDMParams( argv[3], argv[4], argv[5] ); // from command line
|
||||
// vocabTrainParams and svmTrainParamsExt is set by defaults
|
||||
paramsFS.open( resPath + "/" + paramsFile, FileStorage::WRITE );
|
||||
if( paramsFS.isOpened() )
|
||||
|
@ -1,40 +1,32 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/video/background_segm.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include <stdio.h>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nDo background segmentation, especially demonstrating the use of cvUpdateBGStatModel().\n"
|
||||
" Learns the background at the start and then segments.\n"
|
||||
" Learning is togged by the space key. Will read from file or camera\n"
|
||||
"Usage: \n"
|
||||
" ./bgfg_segm [--file_name]=<input file, camera as defautl>\n\n");
|
||||
"Learns the background at the start and then segments.\n"
|
||||
"Learning is togged by the space key. Will read from file or camera\n"
|
||||
"Call:\n"
|
||||
"./ bgfg_segm [file name -- if no name, read from camera]\n\n");
|
||||
}
|
||||
|
||||
//this is a sample for foreground detection functions
|
||||
int main(int argc, const char** argv)
|
||||
int main(int argc, char** argv)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string fileName = parser.get<string>("file_name", "0");
|
||||
VideoCapture cap;
|
||||
bool update_bg_model = true;
|
||||
|
||||
|
||||
if(fileName == "0" )
|
||||
if( argc < 2 )
|
||||
cap.open(0);
|
||||
else
|
||||
cap.open(fileName.c_str());
|
||||
|
||||
cap.open(argv[1]);
|
||||
help();
|
||||
|
||||
if( !cap.isOpened() )
|
||||
{
|
||||
help();
|
||||
printf("can not open camera or video file\n");
|
||||
return -1;
|
||||
}
|
||||
|
@ -4,7 +4,6 @@
|
||||
* Created on: Oct 17, 2010
|
||||
* Author: ethan
|
||||
*/
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/calib3d/calib3d.hpp"
|
||||
#include "opencv2/features2d/features2d.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
@ -12,7 +11,6 @@
|
||||
#include <vector>
|
||||
#include <iostream>
|
||||
|
||||
using namespace std;
|
||||
using namespace cv;
|
||||
|
||||
using std::cout;
|
||||
@ -20,15 +18,13 @@ using std::cerr;
|
||||
using std::endl;
|
||||
using std::vector;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nThis program shows how to use BRIEF descriptor to match points in features2d\n"
|
||||
"It takes in two images, finds keypoints and matches them displaying matches and final homography warped results\n"
|
||||
"Usage: \n"
|
||||
" ./brief_match_test [--first_file]=<first file name, left01.jpg as default> \n"
|
||||
" [--second_file]=<second file name, left02.jpg as default> \n"
|
||||
"Example: \n"
|
||||
"./brief_match_test --first_file=left01.jpg --second_file=left02.jpg \n");
|
||||
void help(char **av)
|
||||
{
|
||||
cerr << "usage: " << av[0] << " im1.jpg im2.jpg"
|
||||
<< "\n"
|
||||
<< "This program shows how to use BRIEF descriptor to match points in features2d\n"
|
||||
<< "It takes in two images, finds keypoints and matches them displaying matches and final homography warped results\n"
|
||||
<< endl;
|
||||
}
|
||||
|
||||
//Copy (x,y) location of descriptor matches found from KeyPoint data structures into Point2f vectors
|
||||
@ -59,22 +55,16 @@ double match(const vector<KeyPoint>& /*kpts_train*/, const vector<KeyPoint>& /*k
|
||||
|
||||
|
||||
|
||||
int main(int ac, const char ** av)
|
||||
int main(int ac, char ** av)
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(ac, av);
|
||||
|
||||
string im1_name, im2_name;
|
||||
im1_name = parser.get<string>("first_file", "left01.jpg");
|
||||
im2_name = parser.get<string>("second_file", "left02.jpg");
|
||||
|
||||
if (im1_name.empty() || im2_name.empty())
|
||||
if (ac != 3)
|
||||
{
|
||||
help();
|
||||
printf("\n You have to indicate two files first_file and second_file \n");
|
||||
return -1;
|
||||
help(av);
|
||||
return 1;
|
||||
}
|
||||
string im1_name, im2_name;
|
||||
im1_name = av[1];
|
||||
im2_name = av[2];
|
||||
|
||||
Mat im1 = imread(im1_name, CV_LOAD_IMAGE_GRAYSCALE);
|
||||
Mat im2 = imread(im2_name, CV_LOAD_IMAGE_GRAYSCALE);
|
||||
|
@ -9,59 +9,69 @@
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
const char * usage =
|
||||
" \nexample command line for calibration from a live feed.\n"
|
||||
" calibration -w 4 -h 5 -s 0.025 -o camera.yml -op -oe\n"
|
||||
" \n"
|
||||
" example command line for calibration from a list of stored images:\n"
|
||||
" imagelist_creator image_list.xml *.png\n"
|
||||
" calibration -w 4 -h 5 -s 0.025 -o camera.yml -op -oe image_list.xml\n"
|
||||
" where image_list.xml is the standard OpenCV XML/YAML\n"
|
||||
" use imagelist_creator to create the xml or yaml list\n"
|
||||
" file consisting of the list of strings, e.g.:\n"
|
||||
" \n"
|
||||
"<?xml version=\"1.0\"?>\n"
|
||||
"<opencv_storage>\n"
|
||||
"<images>\n"
|
||||
"view000.png\n"
|
||||
"view001.png\n"
|
||||
"<!-- view002.png -->\n"
|
||||
"view003.png\n"
|
||||
"view010.png\n"
|
||||
"one_extra_view.jpg\n"
|
||||
"</images>\n"
|
||||
"</opencv_storage>\n";
|
||||
|
||||
|
||||
|
||||
|
||||
const char* liveCaptureHelp =
|
||||
"When the live video from camera is used as input, the following hot-keys may be used:\n"
|
||||
" <ESC>, 'q' - quit the program\n"
|
||||
" 'g' - start capturing images\n"
|
||||
" 'u' - switch undistortion on/off\n";
|
||||
|
||||
void help()
|
||||
{
|
||||
printf( "This is a camera calibration sample.\n"
|
||||
"Usage: calibration\n"
|
||||
" -w=<board_width> # the number of inner corners per one of board dimension\n"
|
||||
" -h=<board_height> # the number of inner corners per another board dimension\n"
|
||||
" [-pt]=<pattern> # the type of pattern: chessboard or circles' grid\n"
|
||||
" [-n]=<number_of_frames> # the number of frames to use for calibration\n"
|
||||
" -w <board_width> # the number of inner corners per one of board dimension\n"
|
||||
" -h <board_height> # the number of inner corners per another board dimension\n"
|
||||
" [-pt <pattern>] # the type of pattern: chessboard or circles' grid\n"
|
||||
" [-n <number_of_frames>] # the number of frames to use for calibration\n"
|
||||
" # (if not specified, it will be set to the number\n"
|
||||
" # of board views actually available)\n"
|
||||
" [-d]=<delay> # a minimum delay in ms between subsequent attempts to capture a next view\n"
|
||||
" [-d <delay>] # a minimum delay in ms between subsequent attempts to capture a next view\n"
|
||||
" # (used only for video capturing)\n"
|
||||
" [-s]=<squareSize> # square size in some user-defined units (1 by default)\n"
|
||||
" [-o]=<out_camera_params> # the output filename for intrinsic [and extrinsic] parameters\n"
|
||||
" [-s <squareSize>] # square size in some user-defined units (1 by default)\n"
|
||||
" [-o <out_camera_params>] # the output filename for intrinsic [and extrinsic] parameters\n"
|
||||
" [-op] # write detected feature points\n"
|
||||
" [-oe] # write extrinsic parameters\n"
|
||||
" [-zt] # assume zero tangential distortion\n"
|
||||
" [-a]=<aspectRatio> # fix aspect ratio (fx/fy)\n"
|
||||
" [-a <aspectRatio>] # fix aspect ratio (fx/fy)\n"
|
||||
" [-p] # fix the principal point at the center\n"
|
||||
" [-v] # flip the captured images around the horizontal axis\n"
|
||||
" [-V] # use a video file, and not an image list, uses\n"
|
||||
" # [input_data] string for the video file name\n"
|
||||
" [-su] # show undistorted images after calibration\n"
|
||||
" [-input_data]=<data file> # input data, one of the following:\n"
|
||||
" [input_data] # input data, one of the following:\n"
|
||||
" # - text file with a list of the images of the board\n"
|
||||
" # the text file can be generated with imagelist_creator\n"
|
||||
" # - name of video file with a video of the board\n"
|
||||
" [-cameraId]=<camera index># if input_data not specified, a live view from the camera is used\n"
|
||||
" \nExample command line for calibration from a live feed:\n"
|
||||
" ./calibration -w=4 -h=5 -s=0.025 -o=camera.yml -op -oe\n"
|
||||
" \n"
|
||||
" Example command line for calibration from a list of stored images:\n"
|
||||
" imagelist_creator image_list.xml *.png\n"
|
||||
" ./calibration -w=4 -h-5 -s=0.025 -o=camera.yml -op -oe -input_data=image_list.xml\n"
|
||||
" where image_list.xml is the standard OpenCV XML/YAML\n"
|
||||
" use imagelist_creator to create the xml or yaml list\n"
|
||||
" file consisting of the list of strings, e.g.:\n"
|
||||
" \n"
|
||||
"<?xml version=\"1.0\"?>\n"
|
||||
"<opencv_storage>\n"
|
||||
"<images>\n"
|
||||
"view000.png\n"
|
||||
"view001.png\n"
|
||||
"<!-- view002.png -->\n"
|
||||
"view003.png\n"
|
||||
"view010.png\n"
|
||||
"one_extra_view.jpg\n"
|
||||
"</images>\n"
|
||||
"</opencv_storage>\n"
|
||||
"\nWhen the live video from camera is used as input, the following hot-keys may be used:\n"
|
||||
" <ESC>, 'q' - quit the program\n"
|
||||
" 'g' - start capturing images\n"
|
||||
" 'u' - switch undistortion on/off\n");
|
||||
" # if input_data not specified, a live view from the camera is used\n"
|
||||
"\n" );
|
||||
printf("\n%s",usage);
|
||||
printf( "\n%s", liveCaptureHelp );
|
||||
}
|
||||
|
||||
enum { DETECTION = 0, CAPTURING = 1, CALIBRATED = 2 };
|
||||
@ -279,74 +289,126 @@ bool runAndSave(const string& outputFilename,
|
||||
}
|
||||
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
help();
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
Size boardSize, imageSize;
|
||||
boardSize.width = parser.get<int>("w");
|
||||
boardSize.height = parser.get<int>("h");
|
||||
float squareSize = parser.get<float>("s", 1.f);
|
||||
float aspectRatio = parser.get<float>("a", 1.f);
|
||||
float squareSize = 1.f, aspectRatio = 1.f;
|
||||
Mat cameraMatrix, distCoeffs;
|
||||
string outputFilename = parser.get<string>("o","out_camera_data.yml");
|
||||
string inputFilename = parser.get<string>("input_data");
|
||||
int nframes = parser.get<int>("n", 10);
|
||||
bool writeExtrinsics = parser.get<bool>("oe");
|
||||
bool writePoints = parser.get<bool>("op");
|
||||
bool flipVertical = parser.get<bool>("v");
|
||||
bool showUndistorted = parser.get<bool>("su");
|
||||
bool videofile = parser.get<bool>("V");
|
||||
unsigned int delay = parser.get<unsigned int>("d", 1000);
|
||||
unsigned int cameraId = parser.get<unsigned int>("cameraId",0);
|
||||
const char* outputFilename = "out_camera_data.yml";
|
||||
const char* inputFilename = 0;
|
||||
|
||||
int i, nframes = 10;
|
||||
bool writeExtrinsics = false, writePoints = false;
|
||||
bool undistortImage = false;
|
||||
int flags = 0;
|
||||
VideoCapture capture;
|
||||
bool flipVertical = false;
|
||||
bool showUndistorted = false;
|
||||
bool videofile = false;
|
||||
int delay = 1000;
|
||||
clock_t prevTimestamp = 0;
|
||||
int mode = DETECTION;
|
||||
int cameraId = 0;
|
||||
vector<vector<Point2f> > imagePoints;
|
||||
vector<string> imageList;
|
||||
Pattern pattern = CHESSBOARD;
|
||||
|
||||
if( (boardSize.width < 1) || (boardSize.height < 1))
|
||||
if( argc < 2 )
|
||||
{
|
||||
help();
|
||||
return fprintf( stderr, "Invalid board width or height. It must be more than zero\n" ), -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if(parser.get<string>("pt")=="circles")
|
||||
pattern = CIRCLES_GRID;
|
||||
else if(parser.get<string>("pt")=="acircles")
|
||||
pattern = ASYMMETRIC_CIRCLES_GRID;
|
||||
if(squareSize <= 0)
|
||||
for( i = 1; i < argc; i++ )
|
||||
{
|
||||
help();
|
||||
return fprintf( stderr, "Invalid board square width. It must be more than zero.\n" ), -1;
|
||||
}
|
||||
if(nframes < 4)
|
||||
{
|
||||
help();
|
||||
return printf("Invalid number of images. It must be more than 3\n" ), -1;
|
||||
}
|
||||
if(aspectRatio <= 0)
|
||||
{
|
||||
help();
|
||||
return printf("Invalid aspect ratio. It must be more than zero\n" ), -1;
|
||||
}
|
||||
const char* s = argv[i];
|
||||
if( strcmp( s, "-w" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%u", &boardSize.width ) != 1 || boardSize.width <= 0 )
|
||||
return fprintf( stderr, "Invalid board width\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-h" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%u", &boardSize.height ) != 1 || boardSize.height <= 0 )
|
||||
return fprintf( stderr, "Invalid board height\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-pt" ) == 0 )
|
||||
{
|
||||
i++;
|
||||
if( !strcmp( argv[i], "circles" ) )
|
||||
pattern = CIRCLES_GRID;
|
||||
else if( !strcmp( argv[i], "acircles" ) )
|
||||
pattern = ASYMMETRIC_CIRCLES_GRID;
|
||||
else if( !strcmp( argv[i], "chessboard" ) )
|
||||
pattern = CHESSBOARD;
|
||||
else
|
||||
return fprintf( stderr, "Invalid pattern type: must be chessboard or circles\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-s" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%f", &squareSize ) != 1 || squareSize <= 0 )
|
||||
return fprintf( stderr, "Invalid board square width\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-n" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%u", &nframes ) != 1 || nframes <= 3 )
|
||||
return printf("Invalid number of images\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-a" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%f", &aspectRatio ) != 1 || aspectRatio <= 0 )
|
||||
return printf("Invalid aspect ratio\n" ), -1;
|
||||
flags |= CV_CALIB_FIX_ASPECT_RATIO;
|
||||
}
|
||||
else if( strcmp( s, "-d" ) == 0 )
|
||||
{
|
||||
if( sscanf( argv[++i], "%u", &delay ) != 1 || delay <= 0 )
|
||||
return printf("Invalid delay\n" ), -1;
|
||||
}
|
||||
else if( strcmp( s, "-op" ) == 0 )
|
||||
{
|
||||
writePoints = true;
|
||||
}
|
||||
else if( strcmp( s, "-oe" ) == 0 )
|
||||
{
|
||||
writeExtrinsics = true;
|
||||
}
|
||||
else if( strcmp( s, "-zt" ) == 0 )
|
||||
{
|
||||
flags |= CV_CALIB_ZERO_TANGENT_DIST;
|
||||
}
|
||||
else if( strcmp( s, "-p" ) == 0 )
|
||||
{
|
||||
flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
|
||||
}
|
||||
else if( strcmp( s, "-v" ) == 0 )
|
||||
{
|
||||
flipVertical = true;
|
||||
}
|
||||
else if( strcmp( s, "-V" ) == 0 )
|
||||
{
|
||||
videofile = true;
|
||||
}
|
||||
else if( strcmp( s, "-o" ) == 0 )
|
||||
{
|
||||
outputFilename = argv[++i];
|
||||
}
|
||||
else if( strcmp( s, "-su" ) == 0 )
|
||||
{
|
||||
showUndistorted = true;
|
||||
}
|
||||
else if( s[0] != '-' )
|
||||
{
|
||||
if( isdigit(s[0]) )
|
||||
sscanf(s, "%d", &cameraId);
|
||||
else
|
||||
inputFilename = s;
|
||||
}
|
||||
else
|
||||
flags |= CV_CALIB_FIX_ASPECT_RATIO;
|
||||
if(!delay)
|
||||
{
|
||||
help();
|
||||
return printf("Invalid delay. It must be more than zero.\n" ), -1;
|
||||
return fprintf( stderr, "Unknown option %s", s ), -1;
|
||||
}
|
||||
if(parser.get<bool>("zt"))
|
||||
flags |= CV_CALIB_ZERO_TANGENT_DIST;
|
||||
if(parser.get<bool>("p"))
|
||||
flags |= CV_CALIB_FIX_PRINCIPAL_POINT;
|
||||
|
||||
if( !inputFilename.empty() )
|
||||
if( inputFilename )
|
||||
{
|
||||
if( !videofile && readStringList(inputFilename, imageList) )
|
||||
mode = CAPTURING;
|
||||
@ -362,9 +424,11 @@ int main( int argc, const char** argv )
|
||||
if( !imageList.empty() )
|
||||
nframes = (int)imageList.size();
|
||||
|
||||
if( capture.isOpened() )
|
||||
printf( "%s", liveCaptureHelp );
|
||||
|
||||
namedWindow( "Image View", 1 );
|
||||
|
||||
int i;
|
||||
for(i = 0;;i++)
|
||||
{
|
||||
Mat view, viewGray;
|
||||
|
@ -1,9 +1,8 @@
|
||||
#include "opencv2/core/core.hpp"
|
||||
#include "opencv2/video/tracking.hpp"
|
||||
#include "opencv2/imgproc/imgproc.hpp"
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
#include <ctype.h>
|
||||
|
||||
using namespace cv;
|
||||
@ -11,17 +10,19 @@ using namespace std;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nThis is a demo that shows mean-shift based tracking\n"
|
||||
"You select a color objects such as your face and it tracks it.\n"
|
||||
"This reads from video camera (0 by default, or the camera number the user enters\n"
|
||||
"Usage:\n"
|
||||
"./camshiftdemo [--cameraIndex]=<camera number, zero as default>\n"
|
||||
"\nHot keys: \n"
|
||||
"\tESC - quit the program\n"
|
||||
"\tc - stop the tracking\n"
|
||||
"\tb - switch to/from backprojection view\n"
|
||||
"\th - show/hide object histogram\n"
|
||||
"To initialize tracking, select the object with mouse\n");
|
||||
cout << "\nThis is a demo that shows mean-shift based tracking\n"
|
||||
<< "You select a color objects such as your face and it tracks it.\n"
|
||||
<< "This reads from video camera (0 by default, or the camera number the user enters\n"
|
||||
<< "Call:\n"
|
||||
<< "\n./camshiftdemo [camera number]"
|
||||
<< "\n" << endl;
|
||||
|
||||
cout << "\n\nHot keys: \n"
|
||||
"\tESC - quit the program\n"
|
||||
"\tc - stop the tracking\n"
|
||||
"\tb - switch to/from backprojection view\n"
|
||||
"\th - show/hide object histogram\n"
|
||||
"To initialize tracking, select the object with mouse\n" << endl;
|
||||
}
|
||||
|
||||
Mat image;
|
||||
@ -63,13 +64,8 @@ void onMouse( int event, int x, int y, int, void* )
|
||||
|
||||
|
||||
|
||||
int main( int argc, const char** argv )
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
unsigned int cameraInd = parser.get<unsigned int>("cameraIndex", 0);
|
||||
VideoCapture cap;
|
||||
Rect trackWindow;
|
||||
RotatedRect trackBox;
|
||||
@ -77,15 +73,20 @@ int main( int argc, const char** argv )
|
||||
float hranges[] = {0,180};
|
||||
const float* phranges = hranges;
|
||||
|
||||
cap.open(cameraInd);
|
||||
if( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
|
||||
cap.open(argc == 2 ? argv[1][0] - '0' : 0);
|
||||
else if( argc == 2 )
|
||||
cap.open(argv[1]);
|
||||
|
||||
if( !cap.isOpened() )
|
||||
{
|
||||
help();
|
||||
printf("***Could not initialize capturing...***\n");
|
||||
cout << "***Could not initialize capturing...***\n";
|
||||
return 0;
|
||||
}
|
||||
|
||||
help();
|
||||
|
||||
namedWindow( "Histogram", 1 );
|
||||
namedWindow( "CamShift Demo", 1 );
|
||||
setMouseCallback( "CamShift Demo", onMouse, 0 );
|
||||
|
@ -2,34 +2,33 @@
|
||||
#include "opencv2/highgui/highgui.hpp"
|
||||
#include "opencv2/contrib/contrib.hpp"
|
||||
|
||||
#include <stdio.h>
|
||||
#include <iostream>
|
||||
|
||||
using namespace cv;
|
||||
using namespace std;
|
||||
|
||||
void help()
|
||||
{
|
||||
printf("\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
|
||||
"edge template and a query edge image.\n"
|
||||
"Usage:\n"
|
||||
"./chamfer [<image edge map, logo_in_clutter.png as default>\n"
|
||||
"<template edge map, logo.png as default>]\n"
|
||||
"Example: \n"
|
||||
" ./chamfer logo_in_clutter.png logo.png\n");
|
||||
cout <<
|
||||
"\nThis program demonstrates Chamfer matching -- computing a distance between an \n"
|
||||
"edge template and a query edge image.\n"
|
||||
"Call:\n"
|
||||
"./chamfer [<image edge map> <template edge map>]\n"
|
||||
"By default\n"
|
||||
"the inputs are ./chamfer logo_in_clutter.png logo.png\n"<< endl;
|
||||
}
|
||||
int main( int argc, const char** argv )
|
||||
int main( int argc, char** argv )
|
||||
{
|
||||
help();
|
||||
|
||||
CommandLineParser parser(argc, argv);
|
||||
|
||||
string image = parser.get<string>("0","logo_in_clutter.png");
|
||||
string tempLate = parser.get<string>("1","logo.png");
|
||||
Mat img = imread(image,0);
|
||||
if( argc != 1 && argc != 3 )
|
||||
{
|
||||
help();
|
||||
return 0;
|
||||
}
|
||||
Mat img = imread(argc == 3 ? argv[1] : "logo_in_clutter.png", 0);
|
||||
Mat cimg;
|
||||
cvtColor(img, cimg, CV_GRAY2BGR);
|
||||
Mat tpl = imread(tempLate,0);
|
||||
|
||||
Mat tpl = imread(argc == 3 ? argv[2] : "logo.png", 0);
|
||||
|
||||
// if the image and the template are not edge maps but normal grayscale images,
|
||||
// you might want to uncomment the lines below to produce the maps. You can also
|
||||
// run Sobel instead of Canny.
|
||||
@ -42,7 +41,7 @@ int main( int argc, const char** argv )
|
||||
int best = chamerMatching( img, tpl, results, costs );
|
||||
if( best < 0 )
|
||||
{
|
||||
printf("not found;\n");
|
||||
cout << "not found;\n";
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user