Merge pull request #10999 from mshabunin:do-more-samples

This commit is contained in:
Maksim Shabunin 2018-03-06 13:31:34 +00:00
commit 09f0ecdf7a
7 changed files with 304 additions and 426 deletions

View File

@ -1,3 +1,7 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/video.hpp"
@ -10,10 +14,10 @@ using namespace cv;
int main(int argc, const char** argv)
{
const String keys = "{c camera||use video stream from camera (default is NO)}"
"{fn file_name|../data/tree.avi|video file}"
"{m method|mog2|method: background subtraction algorithm ('knn', 'mog2')}"
"{h help||show help message}";
const String keys = "{c camera | 0 | use video stream from camera (device index starting from 0) }"
"{fn file_name | | use video file as input }"
"{m method | mog2 | method: background subtraction algorithm ('knn', 'mog2')}"
"{h help | | show help message}";
CommandLineParser parser(argc, argv, keys);
parser.about("This sample demonstrates background segmentation.");
if (parser.has("help"))
@ -21,7 +25,7 @@ int main(int argc, const char** argv)
parser.printMessage();
return 0;
}
bool useCamera = parser.has("camera");
int camera = parser.get<int>("camera");
String file = parser.get<String>("file_name");
String method = parser.get<String>("method");
if (!parser.check())
@ -31,13 +35,13 @@ int main(int argc, const char** argv)
}
VideoCapture cap;
if (useCamera)
cap.open(0);
if (file.empty())
cap.open(camera);
else
cap.open(file.c_str());
if (!cap.isOpened())
{
cout << "Can not open video stream: '" << (useCamera ? "<camera 0>" : file) << "'" << endl;
cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;
return 2;
}

View File

@ -1,16 +1,17 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include <opencv2/core/utility.hpp>
#include <iostream>
const char* keys =
{
"{ b build | | print complete build info }"
"{ h help | | print this help }"
};
static const std::string keys = "{ b build | | print complete build info }"
"{ h help | | print this help }";
int main(int argc, const char* argv[])
{
cv::CommandLineParser parser(argc, argv, keys);
parser.about("This sample outputs OpenCV version and build configuration.");
if (parser.has("help"))
{
parser.printMessage();
@ -27,6 +28,5 @@ int main(int argc, const char* argv[])
{
std::cout << "Welcome to OpenCV " << CV_VERSION << std::endl;
}
return 0;
}

View File

@ -1,177 +1,126 @@
#include <iostream>
#include <stdexcept>
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include <opencv2/objdetect.hpp>
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/video.hpp>
#include <opencv2/videoio.hpp>
#include <iostream>
#include <iomanip>
using namespace cv;
using namespace std;
const char* keys =
class Detector
{
"{ help h | | print help message }"
"{ image i | | specify input image}"
"{ camera c | | enable camera capturing }"
"{ video v | ../data/vtest.avi | use video as input }"
"{ directory d | | images directory}"
};
static void detectAndDraw(const HOGDescriptor &hog, Mat &img)
{
vector<Rect> found, found_filtered;
double t = (double) getTickCount();
// Run the detector with default parameters. to get a higher hit-rate
// (and more false alarms, respectively), decrease the hitThreshold and
// groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
t = (double) getTickCount() - t;
cout << "detection time = " << (t*1000./cv::getTickFrequency()) << " ms" << endl;
for(size_t i = 0; i < found.size(); i++ )
enum Mode { Default, Daimler } m;
HOGDescriptor hog, hog_d;
public:
Detector() : m(Default), hog(), hog_d(Size(48, 96), Size(16, 16), Size(8, 8), Size(8, 8), 9)
{
Rect r = found[i];
size_t j;
// Do not add small detections inside a bigger detection.
for ( j = 0; j < found.size(); j++ )
if ( j != i && (r & found[j]) == r )
break;
if ( j == found.size() )
found_filtered.push_back(r);
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
hog_d.setSVMDetector(HOGDescriptor::getDaimlerPeopleDetector());
}
for (size_t i = 0; i < found_filtered.size(); i++)
void toggleMode() { m = (m == Default ? Daimler : Default); }
string modeName() const { return (m == Default ? "Default" : "Daimler"); }
vector<Rect> detect(InputArray img)
{
// Run the detector with default parameters. to get a higher hit-rate
// (and more false alarms, respectively), decrease the hitThreshold and
// groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
vector<Rect> found;
if (m == Default)
hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2, false);
else if (m == Daimler)
hog_d.detectMultiScale(img, found, 0.5, Size(8,8), Size(32,32), 1.05, 2, true);
return found;
}
void adjustRect(Rect & r) const
{
Rect r = found_filtered[i];
// The HOG detector returns slightly larger rectangles than the real objects,
// so we slightly shrink the rectangles to get a nicer output.
r.x += cvRound(r.width*0.1);
r.width = cvRound(r.width*0.8);
r.y += cvRound(r.height*0.07);
r.height = cvRound(r.height*0.8);
rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
}
}
};
static const string keys = "{ help h | | print help message }"
"{ camera c | 0 | capture video from camera (device index starting from 0) }"
"{ video v | | use video as input }";
int main(int argc, char** argv)
{
CommandLineParser parser(argc, argv, keys);
parser.about("This sample demonstrates the use ot the HoG descriptor.");
if (parser.has("help"))
{
cout << "\nThis program demonstrates the use of the HoG descriptor using\n"
" HOGDescriptor::hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());\n";
parser.printMessage();
cout << "During execution:\n\tHit q or ESC key to quit.\n"
"\tUsing OpenCV version " << CV_VERSION << "\n"
"Note: camera device number must be different from -1.\n" << endl;
return 0;
}
HOGDescriptor hog;
hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
namedWindow("people detector", 1);
string pattern_glob = "";
string video_filename = "../data/vtest.avi";
int camera_id = -1;
if (parser.has("directory"))
int camera = parser.get<int>("camera");
string file = parser.get<string>("video");
if (!parser.check())
{
pattern_glob = parser.get<string>("directory");
}
else if (parser.has("image"))
{
pattern_glob = parser.get<string>("image");
}
else if (parser.has("camera"))
{
camera_id = parser.get<int>("camera");
}
else if (parser.has("video"))
{
video_filename = parser.get<string>("video");
parser.printErrors();
return 1;
}
if (!pattern_glob.empty() || camera_id != -1 || !video_filename.empty())
VideoCapture cap;
if (file.empty())
cap.open(camera);
else
cap.open(file.c_str());
if (!cap.isOpened())
{
//Read from input image files
vector<String> filenames;
//Read from video file
VideoCapture vc;
Mat frame;
cout << "Can not open video stream: '" << (file.empty() ? "<camera>" : file) << "'" << endl;
return 2;
}
if (!pattern_glob.empty())
cout << "Press 'q' or <ESC> to quit." << endl;
cout << "Press <space> to toggle between Default and Daimler detector" << endl;
Detector detector;
Mat frame;
for (;;)
{
cap >> frame;
if (frame.empty())
{
String folder(pattern_glob);
glob(folder, filenames);
cout << "Finished reading: empty frame" << endl;
break;
}
else if (camera_id != -1)
int64 t = getTickCount();
vector<Rect> found = detector.detect(frame);
t = getTickCount() - t;
// show the window
{
vc.open(camera_id);
if (!vc.isOpened())
{
stringstream msg;
msg << "can't open camera: " << camera_id;
throw runtime_error(msg.str());
}
ostringstream buf;
buf << "Mode: " << detector.modeName() << " ||| "
<< "FPS: " << fixed << setprecision(1) << (getTickFrequency() / (double)t);
putText(frame, buf.str(), Point(10, 30), FONT_HERSHEY_PLAIN, 2.0, Scalar(0, 0, 255), 2, LINE_AA);
}
else
for (vector<Rect>::iterator i = found.begin(); i != found.end(); ++i)
{
vc.open(video_filename.c_str());
if (!vc.isOpened())
throw runtime_error(string("can't open video file: " + video_filename));
Rect &r = *i;
detector.adjustRect(r);
rectangle(frame, r.tl(), r.br(), cv::Scalar(0, 255, 0), 2);
}
imshow("People detector", frame);
vector<String>::const_iterator it_image = filenames.begin();
for (;;)
// interact with user
const char key = (char)waitKey(30);
if (key == 27 || key == 'q') // ESC
{
if (!pattern_glob.empty())
{
bool read_image_ok = false;
for (; it_image != filenames.end(); ++it_image)
{
cout << "\nRead: " << *it_image << endl;
// Read current image
frame = imread(*it_image);
if (!frame.empty())
{
++it_image;
read_image_ok = true;
break;
}
}
//No more valid images
if (!read_image_ok)
{
//Release the image in order to exit the while loop
frame.release();
}
}
else
{
vc >> frame;
}
if (frame.empty())
break;
detectAndDraw(hog, frame);
imshow("people detector", frame);
int c = waitKey( vc.isOpened() ? 30 : 0 ) & 255;
if ( c == 'q' || c == 'Q' || c == 27)
break;
cout << "Exit requested" << endl;
break;
}
else if (key == ' ')
{
detector.toggleMode();
}
}
return 0;
}

View File

@ -1,20 +1,18 @@
//
// This program is based on https://github.com/richzhang/colorization/blob/master/colorization/colorize.py
// download the caffemodel from: http://eecs.berkeley.edu/~rich.zhang/projects/2016_colorization/files/demo_v2/colorization_release_v2.caffemodel
// and the prototxt from: https://github.com/richzhang/colorization/blob/master/colorization/models/colorization_deploy_v2.prototxt
//
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include <opencv2/dnn.hpp>
#include <opencv2/imgproc.hpp>
#include <opencv2/highgui.hpp>
#include <iostream>
using namespace cv;
using namespace cv::dnn;
#include <iostream>
using namespace std;
// the 313 ab cluster centers from pts_in_hull.npy (already transposed)
float hull_pts[] = {
static float hull_pts[] = {
-90., -90., -90., -90., -90., -80., -80., -80., -80., -80., -80., -80., -80., -70., -70., -70., -70., -70., -70., -70., -70.,
-70., -70., -60., -60., -60., -60., -60., -60., -60., -60., -60., -60., -60., -60., -50., -50., -50., -50., -50., -50., -50., -50.,
-50., -50., -50., -50., -50., -50., -40., -40., -40., -40., -40., -40., -40., -40., -40., -40., -40., -40., -40., -40., -40., -30.,
@ -43,54 +41,61 @@ float hull_pts[] = {
-20., -10., 0., 10., 20., 30., 40., 50., 60., 70., -90., -80., -70., -60., -50., -40., -30., -20., -10., 0.
};
int main(int argc, char **argv)
{
CommandLineParser parser(argc, argv,
"{ help | false | print this help message }"
"{ proto | colorization_deploy_v2.prototxt | model configuration }"
"{ model | colorization_release_v2.caffemodel | model weights }"
"{ image | space_shuttle.jpg | path to image file }"
"{ opencl | false | enable OpenCL }"
);
String modelTxt = parser.get<string>("proto");
String modelBin = parser.get<string>("model");
String imageFile = parser.get<String>("image");
if (parser.get<bool>("help") || modelTxt.empty() || modelBin.empty() || imageFile.empty())
const string about =
"This sample demonstrates recoloring grayscale images with dnn.\n"
"This program is based on:\n"
" http://richzhang.github.io/colorization\n"
" https://github.com/richzhang/colorization\n"
"Download caffemodel and prototxt files:\n"
" http://eecs.berkeley.edu/~rich.zhang/projects/2016_colorization/files/demo_v2/colorization_release_v2.caffemodel\n"
" https://raw.githubusercontent.com/richzhang/colorization/master/colorization/models/colorization_deploy_v2.prototxt\n";
const string keys =
"{ h help | | print this help message }"
"{ proto | colorization_deploy_v2.prototxt | model configuration }"
"{ model | colorization_release_v2.caffemodel | model weights }"
"{ image | space_shuttle.jpg | path to image file }"
"{ opencl | | enable OpenCL }";
CommandLineParser parser(argc, argv, keys);
parser.about(about);
if (parser.has("help"))
{
cout << "A sample app to demonstrate recoloring grayscale images with dnn." << endl;
parser.printMessage();
return 0;
}
// fixed input size for the pretrained network
int W_in = 224;
int H_in = 224;
Net net = dnn::readNetFromCaffe(modelTxt, modelBin);
// setup additional layers:
int sz[] = {2, 313, 1, 1};
Mat pts_in_hull(4, sz, CV_32F, hull_pts);
Ptr<dnn::Layer> class8_ab = net.getLayer("class8_ab");
class8_ab->blobs.push_back(pts_in_hull);
Ptr<dnn::Layer> conv8_313_rh = net.getLayer("conv8_313_rh");
conv8_313_rh->blobs.push_back(Mat(1, 313, CV_32F, 2.606f));
if (parser.get<bool>("opencl"))
string modelTxt = parser.get<string>("proto");
string modelBin = parser.get<string>("model");
string imageFile = parser.get<string>("image");
bool useOpenCL = parser.has("opencl");
if (!parser.check())
{
net.setPreferableTarget(DNN_TARGET_OPENCL);
parser.printErrors();
return 1;
}
Mat img = imread(imageFile);
if (img.empty())
{
std::cerr << "Can't read image from the file: " << imageFile << std::endl;
exit(-1);
cout << "Can't read image from file: " << imageFile << endl;
return 2;
}
// fixed input size for the pretrained network
const int W_in = 224;
const int H_in = 224;
Net net = dnn::readNetFromCaffe(modelTxt, modelBin);
if (useOpenCL)
net.setPreferableTarget(DNN_TARGET_OPENCL);
// setup additional layers:
int sz[] = {2, 313, 1, 1};
const Mat pts_in_hull(4, sz, CV_32F, hull_pts);
Ptr<dnn::Layer> class8_ab = net.getLayer("class8_ab");
class8_ab->blobs.push_back(pts_in_hull);
Ptr<dnn::Layer> conv8_313_rh = net.getLayer("conv8_313_rh");
conv8_313_rh->blobs.push_back(Mat(1, 313, CV_32F, Scalar(2.606)));
// extract L channel and subtract mean
Mat lab, L, input;
img.convertTo(img, CV_32F, 1.0/255);
@ -111,13 +116,11 @@ int main(int argc, char **argv)
resize(a, a, img.size());
resize(b, b, img.size());
// merge, and convert back to bgr
// merge, and convert back to BGR
Mat color, chn[] = {L, a, b};
merge(chn, 3, lab);
cvtColor(lab, color, COLOR_Lab2BGR);
namedWindow("color", WINDOW_NORMAL);
namedWindow("original", WINDOW_NORMAL);
imshow("color", color);
imshow("original", img);
waitKey();

View File

@ -0,0 +1,151 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include <iostream>
#include <iomanip>
#include <vector>
#include "opencv2/core/ocl.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/video.hpp"
using namespace std;
using namespace cv;
static Mat getVisibleFlow(InputArray flow)
{
vector<UMat> flow_vec;
split(flow, flow_vec);
UMat magnitude, angle;
cartToPolar(flow_vec[0], flow_vec[1], magnitude, angle, true);
magnitude.convertTo(magnitude, CV_32F, 0.2);
vector<UMat> hsv_vec;
hsv_vec.push_back(angle);
hsv_vec.push_back(UMat::ones(angle.size(), angle.type()));
hsv_vec.push_back(magnitude);
UMat hsv;
merge(hsv_vec, hsv);
Mat img;
cvtColor(hsv, img, COLOR_HSV2BGR);
return img;
}
static Size fitSize(const Size & sz, const Size & bounds)
{
CV_Assert(sz.area() > 0);
if (sz.width > bounds.width || sz.height > bounds.height)
{
double scale = std::min((double)bounds.width / sz.width, (double)bounds.height / sz.height);
return Size(cvRound(sz.width * scale), cvRound(sz.height * scale));
}
return sz;
}
int main(int argc, const char* argv[])
{
const char* keys =
"{ h help | | print help message }"
"{ c camera | 0 | capture video from camera (device index starting from 0) }"
"{ a algorithm | fb | algorithm (supported: 'fb', 'tvl')}"
"{ m cpu | | run without OpenCL }"
"{ v video | | use video as input }"
"{ o original | | use original frame size (do not resize to 640x480)}"
;
CommandLineParser parser(argc, argv, keys);
parser.about("This sample demonstrates using of dense optical flow algorithms.");
if (parser.has("help"))
{
parser.printMessage();
return 0;
}
int camera = parser.get<int>("camera");
string algorithm = parser.get<string>("algorithm");
bool useCPU = parser.has("cpu");
string filename = parser.get<string>("video");
bool useOriginalSize = parser.has("original");
if (!parser.check())
{
parser.printErrors();
return 1;
}
VideoCapture cap;
if(filename.empty())
cap.open(camera);
else
cap.open(filename);
if (!cap.isOpened())
{
cout << "Can not open video stream: '" << (filename.empty() ? "<camera>" : filename) << "'" << endl;
return 2;
}
cv::Ptr<cv::DenseOpticalFlow> alg;
if (algorithm == "fb")
alg = cv::FarnebackOpticalFlow::create();
else if (algorithm == "tvl")
alg = cv::DualTVL1OpticalFlow::create();
else
{
cout << "Invalid algorithm: " << algorithm << endl;
return 3;
}
ocl::setUseOpenCL(!useCPU);
cout << "Press 'm' to toggle CPU/GPU processing mode" << endl;
cout << "Press ESC or 'q' to exit" << endl;
UMat prevFrame, frame, input_frame, flow;
for(;;)
{
if (!cap.read(input_frame) || input_frame.empty())
{
cout << "Finished reading: empty frame" << endl;
break;
}
Size small_size = fitSize(input_frame.size(), Size(640, 480));
if (!useOriginalSize && small_size != input_frame.size())
resize(input_frame, frame, small_size);
else
frame = input_frame;
cvtColor(frame, frame, COLOR_BGR2GRAY);
imshow("frame", frame);
if (!prevFrame.empty())
{
int64 t = getTickCount();
alg->calc(prevFrame, frame, flow);
t = getTickCount() - t;
{
Mat img = getVisibleFlow(flow);
ostringstream buf;
buf << "Algo: " << algorithm << " | "
<< "Mode: " << (useCPU ? "CPU" : "GPU") << " | "
<< "FPS: " << fixed << setprecision(1) << (getTickFrequency() / (double)t);
putText(img, buf.str(), Point(10, 30), FONT_HERSHEY_PLAIN, 2.0, Scalar(0, 0, 255), 2, LINE_AA);
imshow("Dense optical flow field", img);
}
}
frame.copyTo(prevFrame);
// interact with user
const char key = (char)waitKey(30);
if (key == 27 || key == 'q') // ESC
{
cout << "Exit requested" << endl;
break;
}
else if (key == 'm')
{
useCPU = !useCPU;
ocl::setUseOpenCL(!useCPU);
cout << "Set processing mode to: " << (useCPU ? "CPU" : "GPU") << endl;
}
}
return 0;
}

View File

@ -1,3 +1,7 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "opencv2/core.hpp"
#include "opencv2/core/ocl.hpp"
#include "opencv2/highgui.hpp"

View File

@ -1,233 +0,0 @@
#include <iostream>
#include <vector>
#include <iomanip>
#include "opencv2/core/ocl.hpp"
#include "opencv2/core/utility.hpp"
#include "opencv2/imgcodecs.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/video.hpp"
using namespace std;
using namespace cv;
typedef unsigned char uchar;
#define LOOP_NUM 10
int64 work_begin = 0;
int64 work_end = 0;
static void workBegin()
{
work_begin = getTickCount();
}
static void workEnd()
{
work_end += (getTickCount() - work_begin);
}
static double getTime()
{
return work_end * 1000. / getTickFrequency();
}
template <typename T> inline T clamp (T x, T a, T b)
{
return ((x) > (a) ? ((x) < (b) ? (x) : (b)) : (a));
}
template <typename T> inline T mapValue(T x, T a, T b, T c, T d)
{
x = ::clamp(x, a, b);
return c + (d - c) * (x - a) / (b - a);
}
static void getFlowField(const Mat& u, const Mat& v, Mat& flowField)
{
float maxDisplacement = 1.0f;
for (int i = 0; i < u.rows; ++i)
{
const float* ptr_u = u.ptr<float>(i);
const float* ptr_v = v.ptr<float>(i);
for (int j = 0; j < u.cols; ++j)
{
float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j]));
if (d > maxDisplacement)
maxDisplacement = d;
}
}
flowField.create(u.size(), CV_8UC4);
for (int i = 0; i < flowField.rows; ++i)
{
const float* ptr_u = u.ptr<float>(i);
const float* ptr_v = v.ptr<float>(i);
Vec4b* row = flowField.ptr<Vec4b>(i);
for (int j = 0; j < flowField.cols; ++j)
{
row[j][0] = 0;
row[j][1] = static_cast<unsigned char> (mapValue (-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f));
row[j][2] = static_cast<unsigned char> (mapValue ( ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f));
row[j][3] = 255;
}
}
}
int main(int argc, const char* argv[])
{
const char* keys =
"{ h help | | print help message }"
"{ l left | | specify left image }"
"{ r right | | specify right image }"
"{ o output | tvl1_output.jpg | specify output save path }"
"{ c camera | 0 | enable camera capturing }"
"{ m cpu_mode | | run without OpenCL }"
"{ v video | | use video as input }";
CommandLineParser cmd(argc, argv, keys);
if (cmd.has("help"))
{
cout << "Usage: pyrlk_optical_flow [options]" << endl;
cout << "Available options:" << endl;
cmd.printMessage();
return EXIT_SUCCESS;
}
string fname0 = cmd.get<string>("l");
string fname1 = cmd.get<string>("r");
string vdofile = cmd.get<string>("v");
string outpath = cmd.get<string>("o");
bool useCPU = cmd.get<bool>("m");
bool useCamera = cmd.get<bool>("c");
int inputName = cmd.get<int>("c");
UMat frame0, frame1;
imread(fname0, cv::IMREAD_GRAYSCALE).copyTo(frame0);
imread(fname1, cv::IMREAD_GRAYSCALE).copyTo(frame1);
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
UMat flow;
Mat show_flow;
vector<UMat> flow_vec;
if (frame0.empty() || frame1.empty())
useCamera = true;
if (useCamera)
{
VideoCapture capture;
UMat frame, frameCopy;
UMat frame0Gray, frame1Gray;
UMat ptr0, ptr1;
if(vdofile.empty())
capture.open( inputName );
else
capture.open(vdofile.c_str());
if(!capture.isOpened())
{
if(vdofile.empty())
cout << "Capture from CAM " << inputName << " didn't work" << endl;
else
cout << "Capture from file " << vdofile << " failed" <<endl;
goto nocamera;
}
cout << "In capture ..." << endl;
for(int i = 0;; i++)
{
if( !capture.read(frame) )
break;
if (i == 0)
{
frame.copyTo( frame0 );
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
}
else
{
if (i%2 == 1)
{
frame.copyTo(frame1);
cvtColor(frame1, frame1Gray, COLOR_BGR2GRAY);
ptr0 = frame0Gray;
ptr1 = frame1Gray;
}
else
{
frame.copyTo(frame0);
cvtColor(frame0, frame0Gray, COLOR_BGR2GRAY);
ptr0 = frame1Gray;
ptr1 = frame0Gray;
}
alg->calc(ptr0, ptr1, flow);
split(flow, flow_vec);
if (i%2 == 1)
frame1.copyTo(frameCopy);
else
frame0.copyTo(frameCopy);
getFlowField(flow_vec[0].getMat(ACCESS_READ), flow_vec[1].getMat(ACCESS_READ), show_flow);
imshow("tvl1 optical flow field", show_flow);
}
char key = (char)waitKey(10);
if (key == 27)
break;
else if (key == 'm' || key == 'M')
{
ocl::setUseOpenCL(!cv::ocl::useOpenCL());
cout << "Switched to " << (ocl::useOpenCL() ? "OpenCL" : "CPU") << " mode\n";
}
}
capture.release();
}
else
{
nocamera:
if (cmd.has("cpu_mode"))
{
ocl::setUseOpenCL(false);
std::cout << "OpenCL was disabled" << std::endl;
}
for(int i = 0; i <= LOOP_NUM; i ++)
{
cout << "loop" << i << endl;
if (i > 0) workBegin();
alg->calc(frame0, frame1, flow);
split(flow, flow_vec);
if (i > 0 && i <= LOOP_NUM)
workEnd();
if (i == LOOP_NUM)
{
if (useCPU)
cout << "average CPU time (noCamera) : ";
else
cout << "average GPU time (noCamera) : ";
cout << getTime() / LOOP_NUM << " ms" << endl;
getFlowField(flow_vec[0].getMat(ACCESS_READ), flow_vec[1].getMat(ACCESS_READ), show_flow);
imshow("PyrLK [Sparse]", show_flow);
imwrite(outpath, show_flow);
}
}
}
waitKey();
return EXIT_SUCCESS;
}