#include #include #include #include #include "opencv2/core.hpp" #include "opencv2/core/utility.hpp" #include "opencv2/highgui.hpp" #include "opencv2/imgproc.hpp" #include "opencv2/gpuoptflow.hpp" #include "opencv2/cudaarithm.hpp" using namespace std; using namespace cv; using namespace cv::cuda; void getFlowField(const Mat& u, const Mat& v, Mat& flowField); int main(int argc, const char* argv[]) { try { const char* keys = "{ h help | | print help message }" "{ l left | | specify left image }" "{ r right | | specify right image }" "{ s scale | 0.8 | set pyramid scale factor }" "{ a alpha | 0.197 | set alpha }" "{ g gamma | 50.0 | set gamma }" "{ i inner | 10 | set number of inner iterations }" "{ o outer | 77 | set number of outer iterations }" "{ si solver | 10 | set number of basic solver iterations }" "{ t time_step | 0.1 | set frame interpolation time step }"; CommandLineParser cmd(argc, argv, keys); if (cmd.has("help") || !cmd.check()) { cmd.printMessage(); cmd.printErrors(); return 0; } string frame0Name = cmd.get("left"); string frame1Name = cmd.get("right"); float scale = cmd.get("scale"); float alpha = cmd.get("alpha"); float gamma = cmd.get("gamma"); int inner_iterations = cmd.get("inner"); int outer_iterations = cmd.get("outer"); int solver_iterations = cmd.get("solver"); float timeStep = cmd.get("time_step"); if (frame0Name.empty() || frame1Name.empty()) { cerr << "Missing input file names" << endl; return -1; } Mat frame0Color = imread(frame0Name); Mat frame1Color = imread(frame1Name); if (frame0Color.empty() || frame1Color.empty()) { cout << "Can't load input images" << endl; return -1; } cv::cuda::printShortCudaDeviceInfo(cv::cuda::getDevice()); cout << "OpenCV / NVIDIA Computer Vision" << endl; cout << "Optical Flow Demo: Frame Interpolation" << endl; cout << "=========================================" << endl; namedWindow("Forward flow"); namedWindow("Backward flow"); namedWindow("Interpolated frame"); cout << "Press:" << endl; cout << "\tESC to quit" << endl; cout << "\t'a' to move to the previous frame" << endl; cout << "\t's' to move to the next frame\n" << endl; frame0Color.convertTo(frame0Color, CV_32F, 1.0 / 255.0); frame1Color.convertTo(frame1Color, CV_32F, 1.0 / 255.0); Mat frame0Gray, frame1Gray; cv::cvtColor(frame0Color, frame0Gray, COLOR_BGR2GRAY); cv::cvtColor(frame1Color, frame1Gray, COLOR_BGR2GRAY); GpuMat d_frame0(frame0Gray); GpuMat d_frame1(frame1Gray); cout << "Estimating optical flow" << endl; BroxOpticalFlow d_flow(alpha, gamma, scale, inner_iterations, outer_iterations, solver_iterations); cout << "\tForward..." << endl; GpuMat d_fu, d_fv; d_flow(d_frame0, d_frame1, d_fu, d_fv); Mat flowFieldForward; getFlowField(Mat(d_fu), Mat(d_fv), flowFieldForward); cout << "\tBackward..." << endl; GpuMat d_bu, d_bv; d_flow(d_frame1, d_frame0, d_bu, d_bv); Mat flowFieldBackward; getFlowField(Mat(d_bu), Mat(d_bv), flowFieldBackward); cout << "Interpolating..." << endl; // first frame color components GpuMat d_b, d_g, d_r; // second frame color components GpuMat d_bt, d_gt, d_rt; // prepare color components on host and copy them to device memory Mat channels[3]; cv::split(frame0Color, channels); d_b.upload(channels[0]); d_g.upload(channels[1]); d_r.upload(channels[2]); cv::split(frame1Color, channels); d_bt.upload(channels[0]); d_gt.upload(channels[1]); d_rt.upload(channels[2]); // temporary buffer GpuMat d_buf; // intermediate frame color components (GPU memory) GpuMat d_rNew, d_gNew, d_bNew; GpuMat d_newFrame; vector frames; frames.reserve(static_cast(1.0f / timeStep) + 2); frames.push_back(frame0Color); // compute interpolated frames for (float timePos = timeStep; timePos < 1.0f; timePos += timeStep) { // interpolate blue channel interpolateFrames(d_b, d_bt, d_fu, d_fv, d_bu, d_bv, timePos, d_bNew, d_buf); // interpolate green channel interpolateFrames(d_g, d_gt, d_fu, d_fv, d_bu, d_bv, timePos, d_gNew, d_buf); // interpolate red channel interpolateFrames(d_r, d_rt, d_fu, d_fv, d_bu, d_bv, timePos, d_rNew, d_buf); GpuMat channels3[] = {d_bNew, d_gNew, d_rNew}; cuda::merge(channels3, 3, d_newFrame); frames.push_back(Mat(d_newFrame)); cout << setprecision(4) << timePos * 100.0f << "%\r"; } frames.push_back(frame1Color); cout << setw(5) << "100%" << endl; cout << "Done" << endl; imshow("Forward flow", flowFieldForward); imshow("Backward flow", flowFieldBackward); int currentFrame = 0; imshow("Interpolated frame", frames[currentFrame]); for(;;) { int key = toupper(waitKey(10) & 0xff); switch (key) { case 27: return 0; case 'A': if (currentFrame > 0) --currentFrame; imshow("Interpolated frame", frames[currentFrame]); break; case 'S': if (currentFrame < static_cast(frames.size()) - 1) ++currentFrame; imshow("Interpolated frame", frames[currentFrame]); break; } } } catch (const exception& ex) { cerr << ex.what() << endl; return -1; } catch (...) { cerr << "Unknow error" << endl; return -1; } } template inline T clamp (T x, T a, T b) { return ((x) > (a) ? ((x) < (b) ? (x) : (b)) : (a)); } template inline T mapValue(T x, T a, T b, T c, T d) { x = clamp(x, a, b); return c + (d - c) * (x - a) / (b - a); } void getFlowField(const Mat& u, const Mat& v, Mat& flowField) { float maxDisplacement = 1.0f; for (int i = 0; i < u.rows; ++i) { const float* ptr_u = u.ptr(i); const float* ptr_v = v.ptr(i); for (int j = 0; j < u.cols; ++j) { float d = max(fabsf(ptr_u[j]), fabsf(ptr_v[j])); if (d > maxDisplacement) maxDisplacement = d; } } flowField.create(u.size(), CV_8UC4); for (int i = 0; i < flowField.rows; ++i) { const float* ptr_u = u.ptr(i); const float* ptr_v = v.ptr(i); Vec4b* row = flowField.ptr(i); for (int j = 0; j < flowField.cols; ++j) { row[j][0] = 0; row[j][1] = static_cast (mapValue (-ptr_v[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); row[j][2] = static_cast (mapValue ( ptr_u[j], -maxDisplacement, maxDisplacement, 0.0f, 255.0f)); row[j][3] = 255; } } }