diff --git a/modules/core/src/matrix_sparse.cpp b/modules/core/src/matrix_sparse.cpp index a2f061cab3..a37967c222 100644 --- a/modules/core/src/matrix_sparse.cpp +++ b/modules/core/src/matrix_sparse.cpp @@ -228,7 +228,7 @@ void SparseMat::create(int d, const int* _sizes, int _type) } } int _sizes_backup[CV_MAX_DIM]; // #5991 - if (_sizes == hdr->size) + if (hdr && _sizes == hdr->size) { for(int i = 0; i < d; i++ ) _sizes_backup[i] = _sizes[i]; diff --git a/modules/dnn/test/test_halide_layers.cpp b/modules/dnn/test/test_halide_layers.cpp index 445f85b376..e9b219fafe 100644 --- a/modules/dnn/test/test_halide_layers.cpp +++ b/modules/dnn/test/test_halide_layers.cpp @@ -93,8 +93,10 @@ TEST_P(Convolution, Accuracy) Backend backendId = get<0>(get<7>(GetParam())); Target targetId = get<1>(get<7>(GetParam())); +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD) - throw SkipTestException(""); + throw SkipTestException("Test is enabled starts from OpenVINO 2018R3"); +#endif bool skipCheck = false; if (cvtest::skipUnstableTests && backendId == DNN_BACKEND_OPENCV && @@ -274,7 +276,8 @@ TEST_P(AvePooling, Accuracy) Size stride = get<3>(GetParam()); Backend backendId = get<0>(get<4>(GetParam())); Target targetId = get<1>(get<4>(GetParam())); - if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD) + if (backendId == DNN_BACKEND_INFERENCE_ENGINE && targetId == DNN_TARGET_MYRIAD && + stride == Size(3, 2) && kernel == Size(3, 3) && outSize != Size(1, 1)) throw SkipTestException(""); const int inWidth = (outSize.width - 1) * stride.width + kernel.width; diff --git a/modules/dnn/test/test_layers.cpp b/modules/dnn/test/test_layers.cpp index a31ce85d0f..28f1167dc5 100644 --- a/modules/dnn/test/test_layers.cpp +++ b/modules/dnn/test/test_layers.cpp @@ -215,8 +215,10 @@ TEST(Layer_Test_Reshape, Accuracy) TEST_P(Test_Caffe_layers, BatchNorm) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 if (backend == DNN_BACKEND_INFERENCE_ENGINE) - throw SkipTestException(""); + throw SkipTestException("Test is enabled starts from OpenVINO 2018R3"); +#endif testLayerUsingCaffeModels("layer_batch_norm", true); testLayerUsingCaffeModels("layer_batch_norm_local_stats", true, false); } @@ -729,8 +731,10 @@ INSTANTIATE_TEST_CASE_P(Layer_Test, Crop, Combine( // into the normalization area. TEST_P(Test_Caffe_layers, Average_pooling_kernel_area) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException(""); + throw SkipTestException("Test is enabled starts from OpenVINO 2018R3"); +#endif LayerParams lp; lp.name = "testAvePool"; lp.type = "Pooling"; diff --git a/modules/dnn/test/test_precomp.hpp b/modules/dnn/test/test_precomp.hpp index 6c1fbd64b1..46299908d8 100644 --- a/modules/dnn/test/test_precomp.hpp +++ b/modules/dnn/test/test_precomp.hpp @@ -111,6 +111,7 @@ public: { throw SkipTestException("Myriad is not available/disabled in OpenCV"); } +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 if (inp && ref && inp->size[0] != 1) { // Myriad plugin supports only batch size 1. Slice a single sample. @@ -127,6 +128,12 @@ public: else throw SkipTestException("Myriad plugin supports only batch size 1"); } +#else + if (inp && ref && inp->dims == 4 && ref->dims == 4 && + inp->size[0] != 1 && inp->size[0] != ref->size[0]) + throw SkipTestException("Inconsistent batch size of input and output blobs for Myriad plugin"); + +#endif } } diff --git a/modules/dnn/test/test_tf_importer.cpp b/modules/dnn/test/test_tf_importer.cpp index 71aa4e7461..b05d1f5440 100644 --- a/modules/dnn/test/test_tf_importer.cpp +++ b/modules/dnn/test/test_tf_importer.cpp @@ -144,8 +144,10 @@ TEST_P(Test_TensorFlow_layers, eltwise_add_mul) TEST_P(Test_TensorFlow_layers, pad_and_concat) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException(""); + throw SkipTestException("Test is enabled starts from OpenVINO 2018R3"); +#endif runTensorFlowNet("pad_and_concat"); } @@ -180,8 +182,10 @@ TEST_P(Test_TensorFlow_layers, pooling) // TODO: fix tests and replace to pooling TEST_P(Test_TensorFlow_layers, ave_pool_same) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException(""); + throw SkipTestException("Test is enabled starts from OpenVINO 2018R3"); +#endif runTensorFlowNet("ave_pool_same"); } @@ -218,9 +222,16 @@ TEST_P(Test_TensorFlow_layers, reshape) TEST_P(Test_TensorFlow_layers, flatten) { if (backend == DNN_BACKEND_INFERENCE_ENGINE && - (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) + (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD)) throw SkipTestException(""); runTensorFlowNet("flatten", true); +} + +TEST_P(Test_TensorFlow_layers, unfused_flatten) +{ + if (backend == DNN_BACKEND_INFERENCE_ENGINE && + (target == DNN_TARGET_OPENCL || target == DNN_TARGET_OPENCL_FP16)) + throw SkipTestException(""); runTensorFlowNet("unfused_flatten"); runTensorFlowNet("unfused_flatten_unknown_batch"); } @@ -500,8 +511,10 @@ TEST_P(Test_TensorFlow_layers, fp16_pad_and_concat) { const float l1 = 0.00071; const float lInf = 0.012; +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE < 2018030000 if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) - throw SkipTestException(""); + throw SkipTestException("Test is enabled starts from OpenVINO 2018R3"); +#endif runTensorFlowNet("fp16_pad_and_concat", false, l1, lInf); } diff --git a/modules/dnn/test/test_torch_importer.cpp b/modules/dnn/test/test_torch_importer.cpp index b6583da7ef..88742c68cc 100644 --- a/modules/dnn/test/test_torch_importer.cpp +++ b/modules/dnn/test/test_torch_importer.cpp @@ -111,10 +111,10 @@ public: TEST_P(Test_Torch_layers, run_convolution) { - if ((backend == DNN_BACKEND_INFERENCE_ENGINE && target != DNN_TARGET_CPU) || - (backend == DNN_BACKEND_OPENCV && target == DNN_TARGET_OPENCL_FP16)) - throw SkipTestException(""); - runTorchNet("net_conv", "", false, true); + // Output reference values are in range [23.4018, 72.0181] + double l1 = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.08 : default_l1; + double lInf = (target == DNN_TARGET_OPENCL_FP16 || target == DNN_TARGET_MYRIAD) ? 0.42 : default_lInf; + runTorchNet("net_conv", "", false, true, l1, lInf); } TEST_P(Test_Torch_layers, run_pool_max) @@ -129,19 +129,23 @@ TEST_P(Test_Torch_layers, run_pool_ave) runTorchNet("net_pool_ave"); } -TEST_P(Test_Torch_layers, run_reshape) +TEST_P(Test_Torch_layers, run_reshape_change_batch_size) { runTorchNet("net_reshape"); +} + +TEST_P(Test_Torch_layers, run_reshape) +{ runTorchNet("net_reshape_batch"); runTorchNet("net_reshape_channels", "", false, true); } TEST_P(Test_Torch_layers, run_reshape_single_sample) { - if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_OPENCL_FP16) - throw SkipTestException(""); + // Reference output values in range [14.4586, 18.4492]. runTorchNet("net_reshape_single_sample", "", false, false, - (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.0052 : 0.0); + (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.0073 : default_l1, + (target == DNN_TARGET_MYRIAD || target == DNN_TARGET_OPENCL_FP16) ? 0.025 : default_lInf); } TEST_P(Test_Torch_layers, run_linear) @@ -154,6 +158,10 @@ TEST_P(Test_Torch_layers, run_linear) TEST_P(Test_Torch_layers, run_concat) { runTorchNet("net_concat", "l5_torchMerge"); +} + +TEST_P(Test_Torch_layers, run_depth_concat) +{ runTorchNet("net_depth_concat", "", false, true, 0.0, target == DNN_TARGET_OPENCL_FP16 ? 0.021 : 0.0); } @@ -207,6 +215,10 @@ TEST_P(Test_Torch_layers, net_conv_gemm_lrn) TEST_P(Test_Torch_layers, net_inception_block) { +#if defined(INF_ENGINE_RELEASE) && INF_ENGINE_RELEASE == 2018030000 + if (backend == DNN_BACKEND_INFERENCE_ENGINE && target == DNN_TARGET_MYRIAD) + throw SkipTestException(""); +#endif runTorchNet("net_inception_block", "", false, true); } diff --git a/modules/imgproc/src/subdivision2d.cpp b/modules/imgproc/src/subdivision2d.cpp index 596806c3ea..6014774722 100644 --- a/modules/imgproc/src/subdivision2d.cpp +++ b/modules/imgproc/src/subdivision2d.cpp @@ -758,24 +758,30 @@ void Subdiv2D::getTriangleList(std::vector& triangleList) const triangleList.clear(); int i, total = (int)(qedges.size()*4); std::vector edgemask(total, false); - Rect2f rect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y); + const bool filterPoints = true; + Rect2f rect(topLeft.x, topLeft.y, bottomRight.x - topLeft.x, bottomRight.y - topLeft.y); for( i = 4; i < total; i += 2 ) { if( edgemask[i] ) continue; Point2f a, b, c; - int edge = i; - edgeOrg(edge, &a); - edgemask[edge] = true; - edge = getEdge(edge, NEXT_AROUND_LEFT); - edgeOrg(edge, &b); - edgemask[edge] = true; - edge = getEdge(edge, NEXT_AROUND_LEFT); - edgeOrg(edge, &c); - edgemask[edge] = true; - if( rect.contains(a) && rect.contains(b) && rect.contains(c) ) - triangleList.push_back(Vec6f(a.x, a.y, b.x, b.y, c.x, c.y)); + int edge_a = i; + edgeOrg(edge_a, &a); + if (filterPoints && !rect.contains(a)) + continue; + int edge_b = getEdge(edge_a, NEXT_AROUND_LEFT); + edgeOrg(edge_b, &b); + if (filterPoints && !rect.contains(b)) + continue; + int edge_c = getEdge(edge_b, NEXT_AROUND_LEFT); + edgeOrg(edge_c, &c); + if (filterPoints && !rect.contains(c)) + continue; + edgemask[edge_a] = true; + edgemask[edge_b] = true; + edgemask[edge_c] = true; + triangleList.push_back(Vec6f(a.x, a.y, b.x, b.y, c.x, c.y)); } } diff --git a/modules/ts/include/opencv2/ts/ts_gtest.h b/modules/ts/include/opencv2/ts/ts_gtest.h index 9771c51d7f..d41a846ad4 100644 --- a/modules/ts/include/opencv2/ts/ts_gtest.h +++ b/modules/ts/include/opencv2/ts/ts_gtest.h @@ -11397,7 +11397,7 @@ struct TuplePolicy { template static typename AddReference(I), Tuple>::type>::type + I, Tuple>::type>::type get(const Tuple& tuple) { return ::std::tr1::get(tuple); } diff --git a/samples/directx/d3d10_interop.cpp b/samples/directx/d3d10_interop.cpp index 48b18bd7b4..85d2607081 100644 --- a/samples/directx/d3d10_interop.cpp +++ b/samples/directx/d3d10_interop.cpp @@ -1,11 +1,10 @@ /* -// Sample demonstrating interoperability of OpenCV UMat with Direct X surface -// At first, the data obtained from video file or camera and -// placed onto Direct X surface, -// following mapping of this Direct X surface to OpenCV UMat and call cv::Blur -// function. The result is mapped back to Direct X surface and rendered through -// Direct X API. +// A sample program demonstrating interoperability of OpenCV cv::UMat with Direct X surface +// At first, the data obtained from video file or camera and placed onto Direct X surface, +// following mapping of this Direct X surface to OpenCV cv::UMat and call cv::Blur function. +// The result is mapped back to Direct X surface and rendered through Direct X API. */ + #define WIN32_LEAN_AND_MEAN #include #include @@ -20,10 +19,6 @@ #pragma comment (lib, "d3d10.lib") - -using namespace std; -using namespace cv; - class D3D10WinApp : public D3DSample { public: @@ -67,19 +62,19 @@ public: &m_pD3D10Dev); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } r = m_pD3D10SwapChain->GetBuffer(0, __uuidof(ID3D10Texture2D), (LPVOID*)&m_pBackBuffer); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } r = m_pD3D10Dev->CreateRenderTargetView(m_pBackBuffer, NULL, &m_pRenderTarget); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } m_pD3D10Dev->OMSetRenderTargets(1, &m_pRenderTarget, NULL); @@ -110,7 +105,7 @@ public: if (FAILED(r)) { std::cerr << "Can't create texture with input image" << std::endl; - return -1; + return EXIT_FAILURE; } // initialize OpenCL context of OpenCV lib from DirectX @@ -123,7 +118,7 @@ public: cv::ocl::Context::getDefault().device(0).name() : "No OpenCL device"; - return 0; + return EXIT_SUCCESS; } // create() @@ -133,9 +128,9 @@ public: HRESULT r; if (!m_cap.read(m_frame_bgr)) - return -1; + return EXIT_FAILURE; - cv::cvtColor(m_frame_bgr, m_frame_rgba, COLOR_BGR2RGBA); + cv::cvtColor(m_frame_bgr, m_frame_rgba, cv::COLOR_BGR2RGBA); UINT subResource = ::D3D10CalcSubresource(0, 0, 1); @@ -154,7 +149,7 @@ public: *ppSurface = m_pSurface; - return 0; + return EXIT_SUCCESS; } // get_surface() @@ -164,7 +159,7 @@ public: try { if (m_shutdown) - return 0; + return EXIT_SUCCESS; // capture user input once MODE mode = (m_mode == MODE_GPU_NV12) ? MODE_GPU_RGBA : m_mode; @@ -175,9 +170,10 @@ public: r = get_surface(&pSurface); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } + m_timer.reset(); m_timer.start(); switch (mode) @@ -199,18 +195,20 @@ public: if (m_demo_processing) { // blur D3D10 surface with OpenCV on CPU - cv::blur(m, m, cv::Size(15, 15), cv::Point(-7, -7)); + cv::blur(m, m, cv::Size(15, 15)); } + m_timer.stop(); + cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_CPU].c_str()); cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame"; - cv::String strTime = cv::format("time: %4.1f msec", m_timer.time(Timer::UNITS::MSEC)); + cv::String strTime = cv::format("time: %4.3f msec", m_timer.getTimeMilli()); cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str()); - cv::putText(m, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strProcessing, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strTime, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strDevName, cv::Point(0, 64), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(m, strMode, cv::Point(0, 20), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(m, strProcessing, cv::Point(0, 40), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(m, strTime, cv::Point(0, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(m, strDevName, cv::Point(0, 80), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); pSurface->Unmap(subResource); @@ -227,18 +225,20 @@ public: if (m_demo_processing) { // blur D3D10 surface with OpenCV on GPU with OpenCL - cv::blur(u, u, cv::Size(15, 15), cv::Point(-7, -7)); + cv::blur(u, u, cv::Size(15, 15)); } + m_timer.stop(); + cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_GPU_RGBA].c_str()); cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame"; - cv::String strTime = cv::format("time: %4.1f msec", m_timer.time(Timer::UNITS::MSEC)); + cv::String strTime = cv::format("time: %4.3f msec", m_timer.getTimeMilli()); cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str()); - cv::putText(u, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(u, strProcessing, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(u, strTime, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(u, strDevName, cv::Point(0, 64), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(u, strMode, cv::Point(0, 20), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(u, strProcessing, cv::Point(0, 40), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(u, strTime, cv::Point(0, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(u, strDevName, cv::Point(0, 80), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); cv::directx::convertToD3D10Texture2D(u, pSurface); @@ -247,8 +247,6 @@ public: } // switch - m_timer.stop(); - // traditional DX render pipeline: // BitBlt surface to backBuffer and flip backBuffer to frontBuffer m_pD3D10Dev->CopyResource(m_pBackBuffer, pSurface); @@ -258,7 +256,7 @@ public: r = m_pD3D10SwapChain->Present(0, 0); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } } // try @@ -268,7 +266,7 @@ public: return 10; } - return 0; + return EXIT_SUCCESS; } // render() @@ -280,7 +278,7 @@ public: SAFE_RELEASE(m_pRenderTarget); SAFE_RELEASE(m_pD3D10Dev); D3DSample::cleanup(); - return 0; + return EXIT_SUCCESS; } // cleanup() private: diff --git a/samples/directx/d3d11_interop.cpp b/samples/directx/d3d11_interop.cpp index 4fc40e90f1..42691c0a63 100644 --- a/samples/directx/d3d11_interop.cpp +++ b/samples/directx/d3d11_interop.cpp @@ -1,11 +1,10 @@ /* -// Sample demonstrating interoperability of OpenCV UMat with Direct X surface -// At first, the data obtained from video file or camera and -// placed onto Direct X surface, -// following mapping of this Direct X surface to OpenCV UMat and call cv::Blur -// function. The result is mapped back to Direct X surface and rendered through -// Direct X API. +// A sample program demonstrating interoperability of OpenCV cv::UMat with Direct X surface +// At first, the data obtained from video file or camera and placed onto Direct X surface, +// following mapping of this Direct X surface to OpenCV cv::UMat and call cv::Blur function. +// The result is mapped back to Direct X surface and rendered through Direct X API. */ + #define WIN32_LEAN_AND_MEAN #include #include @@ -20,10 +19,6 @@ #pragma comment (lib, "d3d11.lib") - -using namespace std; -using namespace cv; - class D3D11WinApp : public D3DSample { public: @@ -188,7 +183,7 @@ public: cv::ocl::Context::getDefault().device(0).name() : "No OpenCL device"; - return 0; + return EXIT_SUCCESS; } // create() @@ -198,11 +193,11 @@ public: HRESULT r; if (!m_cap.read(m_frame_bgr)) - return -1; + return EXIT_FAILURE; if (use_nv12) { - cv::cvtColor(m_frame_bgr, m_frame_i420, COLOR_BGR2YUV_I420); + cv::cvtColor(m_frame_bgr, m_frame_i420, cv::COLOR_BGR2YUV_I420); convert_I420_to_NV12(m_frame_i420, m_frame_nv12, m_width, m_height); @@ -210,7 +205,7 @@ public: } else { - cv::cvtColor(m_frame_bgr, m_frame_rgba, COLOR_BGR2RGBA); + cv::cvtColor(m_frame_bgr, m_frame_rgba, cv::COLOR_BGR2RGBA); // process video frame on CPU UINT subResource = ::D3D11CalcSubresource(0, 0, 1); @@ -230,7 +225,7 @@ public: *ppSurface = use_nv12 ? m_pSurfaceNV12 : m_pSurfaceRGBA; - return 0; + return EXIT_SUCCESS; } // get_surface() @@ -240,7 +235,7 @@ public: try { if (m_shutdown) - return 0; + return EXIT_SUCCESS; // capture user input once MODE mode = (m_mode == MODE_GPU_NV12 && !m_nv12_available) ? MODE_GPU_RGBA : m_mode; @@ -254,6 +249,7 @@ public: throw std::runtime_error("get_surface() failed!"); } + m_timer.reset(); m_timer.start(); switch (mode) @@ -275,18 +271,20 @@ public: if (m_demo_processing) { // blur data from D3D11 surface with OpenCV on CPU - cv::blur(m, m, cv::Size(15, 15), cv::Point(-7, -7)); + cv::blur(m, m, cv::Size(15, 15)); } + m_timer.stop(); + cv::String strMode = cv::format("mode: %s", m_modeStr[MODE_CPU].c_str()); cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame"; - cv::String strTime = cv::format("time: %4.1f msec", m_timer.time(Timer::UNITS::MSEC)); + cv::String strTime = cv::format("time: %4.3f msec", m_timer.getTimeMilli()); cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str()); - cv::putText(m, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strProcessing, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strTime, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(m, strDevName, cv::Point(0, 64), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(m, strMode, cv::Point(0, 20), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(m, strProcessing, cv::Point(0, 40), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(m, strTime, cv::Point(0, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(m, strDevName, cv::Point(0, 80), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); m_pD3D11Ctx->Unmap(pSurface, subResource); @@ -304,18 +302,20 @@ public: if (m_demo_processing) { // blur data from D3D11 surface with OpenCV on GPU with OpenCL - cv::blur(u, u, cv::Size(15, 15), cv::Point(-7, -7)); + cv::blur(u, u, cv::Size(15, 15)); } + m_timer.stop(); + cv::String strMode = cv::format("mode: %s", m_modeStr[mode].c_str()); cv::String strProcessing = m_demo_processing ? "blur frame" : "copy frame"; - cv::String strTime = cv::format("time: %4.1f msec", m_timer.time(Timer::UNITS::MSEC)); + cv::String strTime = cv::format("time: %4.3f msec", m_timer.getTimeMilli()); cv::String strDevName = cv::format("OpenCL device: %s", m_oclDevName.c_str()); - cv::putText(u, strMode, cv::Point(0, 16), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(u, strProcessing, cv::Point(0, 32), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(u, strTime, cv::Point(0, 48), 1, 0.8, cv::Scalar(0, 0, 0)); - cv::putText(u, strDevName, cv::Point(0, 64), 1, 0.8, cv::Scalar(0, 0, 0)); + cv::putText(u, strMode, cv::Point(0, 20), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(u, strProcessing, cv::Point(0, 40), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(u, strTime, cv::Point(0, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); + cv::putText(u, strDevName, cv::Point(0, 80), cv::FONT_HERSHEY_SIMPLEX, 0.8, cv::Scalar(0, 0, 200), 2); cv::directx::convertToD3D11Texture2D(u, pSurface); @@ -336,7 +336,7 @@ public: } cv::Mat frame_nv12(m_height + (m_height / 2), m_width, CV_8UC1, mappedTex.pData, mappedTex.RowPitch); - cv::cvtColor(frame_nv12, m_frame_rgba, COLOR_YUV2RGBA_NV12); + cv::cvtColor(frame_nv12, m_frame_rgba, cv::COLOR_YUV2RGBA_NV12); m_pD3D11Ctx->Unmap(m_pSurfaceNV12_cpu_copy, subResource); } @@ -365,8 +365,6 @@ public: } // switch - m_timer.stop(); - // traditional DX render pipeline: // BitBlt surface to backBuffer and flip backBuffer to frontBuffer m_pD3D11Ctx->CopyResource(m_pBackBuffer, pSurface); @@ -394,7 +392,7 @@ public: return 11; } - return 0; + return EXIT_SUCCESS; } // render() @@ -409,7 +407,7 @@ public: SAFE_RELEASE(m_pD3D11Dev); SAFE_RELEASE(m_pD3D11Ctx); D3DSample::cleanup(); - return 0; + return EXIT_SUCCESS; } // cleanup() protected: diff --git a/samples/directx/d3d9_interop.cpp b/samples/directx/d3d9_interop.cpp index 881f6ac9e7..31a1914cf1 100644 --- a/samples/directx/d3d9_interop.cpp +++ b/samples/directx/d3d9_interop.cpp @@ -1,11 +1,10 @@ /* -// Sample demonstrating interoperability of OpenCV UMat with Direct X surface -// At first, the data obtained from video file or camera and -// placed onto Direct X surface, -// following mapping of this Direct X surface to OpenCV UMat and call cv::Blur -// function. The result is mapped back to Direct X surface and rendered through -// Direct X API. +// A sample program demonstrating interoperability of OpenCV cv::UMat with Direct X surface +// At first, the data obtained from video file or camera and placed onto Direct X surface, +// following mapping of this Direct X surface to OpenCV cv::UMat and call cv::Blur function. +// The result is mapped back to Direct X surface and rendered through Direct X API. */ + #define WIN32_LEAN_AND_MEAN #include #include @@ -21,9 +20,6 @@ #pragma comment (lib, "d3d9.lib") -using namespace std; -using namespace cv; - class D3D9WinApp : public D3DSample { public: @@ -43,7 +39,7 @@ public: m_pD3D9 = ::Direct3DCreate9(D3D_SDK_VERSION); if (NULL == m_pD3D9) { - return -1; + return EXIT_FAILURE; } DWORD flags = D3DCREATE_HARDWARE_VERTEXPROCESSING | @@ -70,20 +66,20 @@ public: r = m_pD3D9->CreateDevice(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, m_hWnd, flags, &d3dpp, &m_pD3D9Dev); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } r = m_pD3D9Dev->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &m_pBackBuffer); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } r = m_pD3D9Dev->CreateOffscreenPlainSurface(m_width, m_height, D3DFMT_A8R8G8B8, D3DPOOL_DEFAULT, &m_pSurface, NULL); if (FAILED(r)) { std::cerr << "Can't create surface for result" << std::endl; - return -1; + return EXIT_FAILURE; } // initialize OpenCL context of OpenCV lib from DirectX @@ -96,7 +92,7 @@ public: cv::ocl::Context::getDefault().device(0).name() : "No OpenCL device"; - return 0; + return EXIT_SUCCESS; } // create() @@ -106,9 +102,9 @@ public: HRESULT r; if (!m_cap.read(m_frame_bgr)) - return -1; + return EXIT_FAILURE; - cv::cvtColor(m_frame_bgr, m_frame_rgba, COLOR_BGR2BGRA); + cv::cvtColor(m_frame_bgr, m_frame_rgba, cv::COLOR_BGR2BGRA); D3DLOCKED_RECT memDesc = { 0, NULL }; RECT rc = { 0, 0, m_width, m_height }; @@ -131,7 +127,7 @@ public: *ppSurface = m_pSurface; - return 0; + return EXIT_SUCCESS; } // get_surface() @@ -141,7 +137,7 @@ public: try { if (m_shutdown) - return 0; + return EXIT_SUCCESS; // capture user input once MODE mode = (m_mode == MODE_GPU_NV12) ? MODE_GPU_RGBA : m_mode; @@ -152,9 +148,10 @@ public: r = get_surface(&pSurface); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } + m_timer.reset(); m_timer.start(); switch (mode) @@ -168,7 +165,7 @@ public: r = pSurface->LockRect(&memDesc, &rc, 0); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } cv::Mat m(m_height, m_width, CV_8UC4, memDesc.pBits, memDesc.Pitch); @@ -176,13 +173,13 @@ public: if (m_demo_processing) { // blur D3D9 surface with OpenCV on CPU - cv::blur(m, m, cv::Size(15, 15), cv::Point(-7, -7)); + cv::blur(m, m, cv::Size(15, 15)); } r = pSurface->UnlockRect(); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } break; @@ -198,7 +195,7 @@ public: if (m_demo_processing) { // blur D3D9 surface with OpenCV on GPU with OpenCL - cv::blur(u, u, cv::Size(15, 15), cv::Point(-7, -7)); + cv::blur(u, u, cv::Size(15, 15)); } cv::directx::convertToDirect3DSurface9(u, pSurface); @@ -210,21 +207,21 @@ public: m_timer.stop(); - print_info(pSurface, mode, m_timer.time(Timer::UNITS::MSEC), m_oclDevName); + print_info(pSurface, mode, m_timer.getTimeMilli(), m_oclDevName); // traditional DX render pipeline: // BitBlt surface to backBuffer and flip backBuffer to frontBuffer r = m_pD3D9Dev->StretchRect(pSurface, NULL, m_pBackBuffer, NULL, D3DTEXF_NONE); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } // present the back buffer contents to the display r = m_pD3D9Dev->Present(NULL, NULL, NULL, NULL); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } } // try @@ -234,11 +231,11 @@ public: return 10; } - return 0; + return EXIT_SUCCESS; } // render() - void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, float time, cv::String oclDevName) + void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, double time, cv::String oclDevName) { HDC hDC; @@ -295,7 +292,7 @@ public: SAFE_RELEASE(m_pD3D9Dev); SAFE_RELEASE(m_pD3D9); D3DSample::cleanup(); - return 0; + return EXIT_SUCCESS; } // cleanup() private: diff --git a/samples/directx/d3d9ex_interop.cpp b/samples/directx/d3d9ex_interop.cpp index 3ddf4c27a9..ef03bd625a 100644 --- a/samples/directx/d3d9ex_interop.cpp +++ b/samples/directx/d3d9ex_interop.cpp @@ -1,11 +1,10 @@ /* -// Sample demonstrating interoperability of OpenCV UMat with Direct X surface -// At first, the data obtained from video file or camera and -// placed onto Direct X surface, -// following mapping of this Direct X surface to OpenCV UMat and call cv::Blur -// function. The result is mapped back to Direct X surface and rendered through -// Direct X API. +// A sample program demonstrating interoperability of OpenCV cv::UMat with Direct X surface +// At first, the data obtained from video file or camera and placed onto Direct X surface, +// following mapping of this Direct X surface to OpenCV cv::UMat and call cv::Blur function. +// The result is mapped back to Direct X surface and rendered through Direct X API. */ + #define WIN32_LEAN_AND_MEAN #include #include @@ -21,9 +20,6 @@ #pragma comment (lib, "d3d9.lib") -using namespace std; -using namespace cv; - class D3D9ExWinApp : public D3DSample { public: @@ -43,7 +39,7 @@ public: r = ::Direct3DCreate9Ex(D3D_SDK_VERSION, &m_pD3D9Ex); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } DWORD flags = D3DCREATE_HARDWARE_VERTEXPROCESSING | @@ -70,20 +66,20 @@ public: r = m_pD3D9Ex->CreateDeviceEx(D3DADAPTER_DEFAULT, D3DDEVTYPE_HAL, m_hWnd, flags, &d3dpp, NULL, &m_pD3D9DevEx); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } r = m_pD3D9DevEx->GetBackBuffer(0, 0, D3DBACKBUFFER_TYPE_MONO, &m_pBackBuffer); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } r = m_pD3D9DevEx->CreateOffscreenPlainSurface(m_width, m_height, D3DFMT_A8R8G8B8, D3DPOOL_DEFAULT, &m_pSurface, NULL); if (FAILED(r)) { std::cerr << "Can't create surface for result" << std::endl; - return -1; + return EXIT_FAILURE; } // initialize OpenCL context of OpenCV lib from DirectX @@ -96,7 +92,7 @@ public: cv::ocl::Context::getDefault().device(0).name() : "No OpenCL device"; - return 0; + return EXIT_SUCCESS; } // create() @@ -106,9 +102,9 @@ public: HRESULT r; if (!m_cap.read(m_frame_bgr)) - return -1; + return EXIT_FAILURE; - cv::cvtColor(m_frame_bgr, m_frame_rgba, COLOR_BGR2BGRA); + cv::cvtColor(m_frame_bgr, m_frame_rgba, cv::COLOR_BGR2BGRA); D3DLOCKED_RECT memDesc = { 0, NULL }; RECT rc = { 0, 0, m_width, m_height }; @@ -131,7 +127,7 @@ public: *ppSurface = m_pSurface; - return 0; + return EXIT_SUCCESS; } // get_surface() @@ -141,7 +137,7 @@ public: try { if (m_shutdown) - return 0; + return EXIT_SUCCESS; // capture user input once MODE mode = m_mode == MODE_GPU_NV12 ? MODE_GPU_RGBA : m_mode; @@ -152,9 +148,10 @@ public: r = get_surface(&pSurface); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } + m_timer.reset(); m_timer.start(); switch (mode) @@ -168,7 +165,7 @@ public: r = pSurface->LockRect(&memDesc, &rc, 0); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } cv::Mat m(m_height, m_width, CV_8UC4, memDesc.pBits, memDesc.Pitch); @@ -176,13 +173,13 @@ public: if (m_demo_processing) { // blur D3D9 surface with OpenCV on CPU - cv::blur(m, m, cv::Size(15, 15), cv::Point(-7, -7)); + cv::blur(m, m, cv::Size(15, 15)); } r = pSurface->UnlockRect(); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } break; @@ -198,7 +195,7 @@ public: if (m_demo_processing) { // blur D3D9 surface with OpenCV on GPU with OpenCL - cv::blur(u, u, cv::Size(15, 15), cv::Point(-7, -7)); + cv::blur(u, u, cv::Size(15, 15)); } cv::directx::convertToDirect3DSurface9(u, pSurface); @@ -210,21 +207,21 @@ public: m_timer.stop(); - print_info(pSurface, m_mode, m_timer.time(Timer::UNITS::MSEC), m_oclDevName); + print_info(pSurface, m_mode, m_timer.getTimeMilli(), m_oclDevName); // traditional DX render pipeline: // BitBlt surface to backBuffer and flip backBuffer to frontBuffer r = m_pD3D9DevEx->StretchRect(pSurface, NULL, m_pBackBuffer, NULL, D3DTEXF_NONE); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } // present the back buffer contents to the display r = m_pD3D9DevEx->Present(NULL, NULL, NULL, NULL); if (FAILED(r)) { - return -1; + return EXIT_FAILURE; } } // try @@ -235,11 +232,11 @@ public: return 10; } - return 0; + return EXIT_SUCCESS; } // render() - void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, float time, cv::String oclDevName) + void print_info(LPDIRECT3DSURFACE9 pSurface, int mode, double time, cv::String oclDevName) { HDC hDC; @@ -296,7 +293,7 @@ public: SAFE_RELEASE(m_pD3D9DevEx); SAFE_RELEASE(m_pD3D9Ex); D3DSample::cleanup(); - return 0; + return EXIT_SUCCESS; } // cleanup() private: diff --git a/samples/directx/d3dsample.hpp b/samples/directx/d3dsample.hpp index 6031bb6138..b082ff9c92 100644 --- a/samples/directx/d3dsample.hpp +++ b/samples/directx/d3dsample.hpp @@ -17,50 +17,6 @@ #define SAFE_RELEASE(p) if (p) { p->Release(); p = NULL; } -class Timer -{ -public: - enum UNITS - { - USEC = 0, - MSEC, - SEC - }; - - Timer() : m_t0(0), m_diff(0) - { - m_tick_frequency = (float)cv::getTickFrequency(); - - m_unit_mul[USEC] = 1000000; - m_unit_mul[MSEC] = 1000; - m_unit_mul[SEC] = 1; - } - - void start() - { - m_t0 = cv::getTickCount(); - } - - void stop() - { - m_diff = cv::getTickCount() - m_t0; - } - - float time(UNITS u = UNITS::MSEC) - { - float sec = m_diff / m_tick_frequency; - - return sec * m_unit_mul[u]; - } - -public: - float m_tick_frequency; - int64 m_t0; - int64 m_diff; - int m_unit_mul[3]; -}; - - class D3DSample : public WinApp { public: @@ -102,22 +58,22 @@ protected: if (wParam == '1') { m_mode = MODE_CPU; - return 0; + return EXIT_SUCCESS; } if (wParam == '2') { m_mode = MODE_GPU_RGBA; - return 0; + return EXIT_SUCCESS; } if (wParam == '3') { m_mode = MODE_GPU_NV12; - return 0; + return EXIT_SUCCESS; } else if (wParam == VK_SPACE) { m_demo_processing = !m_demo_processing; - return 0; + return EXIT_SUCCESS; } else if (wParam == VK_ESCAPE) { @@ -130,7 +86,7 @@ protected: case WM_DESTROY: ::PostQuitMessage(0); - return 0; + return EXIT_SUCCESS; } return ::DefWindowProc(hWnd, message, wParam, lParam); @@ -147,28 +103,14 @@ protected: cv::VideoCapture m_cap; cv::Mat m_frame_bgr; cv::Mat m_frame_rgba; - Timer m_timer; + cv::TickMeter m_timer; }; -static void help() -{ - printf( - "\nSample demonstrating interoperability of DirectX and OpenCL with OpenCV.\n" - "Hot keys: \n" - " SPACE - turn processing on/off\n" - " 1 - process DX surface through OpenCV on CPU\n" - " 2 - process DX RGBA surface through OpenCV on GPU (via OpenCL)\n" - " 3 - process DX NV12 surface through OpenCV on GPU (via OpenCL)\n" - " ESC - exit\n\n"); -} - - static const char* keys = { - "{c camera | true | use camera or not}" + "{c camera | 0 | camera id }" "{f file | | movie file name }" - "{h help | | print help info }" }; @@ -177,25 +119,30 @@ int d3d_app(int argc, char** argv, std::string& title) { cv::CommandLineParser parser(argc, argv, keys); std::string file = parser.get("file"); - bool useCamera = parser.has("camera"); - bool showHelp = parser.has("help"); + int camera_id = parser.get("camera"); - if (showHelp) - help(); + parser.about( + "\nA sample program demonstrating interoperability of DirectX and OpenCL with OpenCV.\n\n" + "Hot keys: \n" + " SPACE - turn processing on/off\n" + " 1 - process DX surface through OpenCV on CPU\n" + " 2 - process DX RGBA surface through OpenCV on GPU (via OpenCL)\n" + " 3 - process DX NV12 surface through OpenCV on GPU (via OpenCL)\n" + " ESC - exit\n\n"); parser.printMessage(); cv::VideoCapture cap; - if (useCamera) - cap.open(0); + if (file.empty()) + cap.open(camera_id); else cap.open(file.c_str()); if (!cap.isOpened()) { printf("can not open camera or video file\n"); - return -1; + return EXIT_FAILURE; } int width = (int)cap.get(cv::CAP_PROP_FRAME_WIDTH); diff --git a/samples/opengl/opengl_interop.cpp b/samples/opengl/opengl_interop.cpp index 87f1f95939..d69f9e2476 100644 --- a/samples/opengl/opengl_interop.cpp +++ b/samples/opengl/opengl_interop.cpp @@ -32,16 +32,6 @@ # pragma comment(lib, "glu32.lib") #endif -using namespace cv; - -/* -// Press key to -// 1 processing on CPU -// 2 processing on GPU -// 9 toggle texture/buffer -// space toggle processing on/off, preserve mode -// esc quit -*/ class GLWinApp : public WinApp { @@ -85,37 +75,37 @@ public: if (wParam == '1') { set_mode(MODE_CPU); - return 0; + return EXIT_SUCCESS; } if (wParam == '2') { set_mode(MODE_GPU); - return 0; + return EXIT_SUCCESS; } else if (wParam == '9') { toggle_buffer(); - return 0; + return EXIT_SUCCESS; } else if (wParam == VK_SPACE) { m_demo_processing = !m_demo_processing; - return 0; + return EXIT_SUCCESS; } else if (wParam == VK_ESCAPE) { cleanup(); - return 0; + return EXIT_SUCCESS; } break; case WM_CLOSE: cleanup(); - return 0; + return EXIT_SUCCESS; case WM_DESTROY: ::PostQuitMessage(0); - return 0; + return EXIT_SUCCESS; } return ::DefWindowProc(hWnd, message, wParam, lParam); @@ -135,7 +125,7 @@ public: } else { - return 0; + return EXIT_SUCCESS; } break; case Expose: @@ -163,7 +153,7 @@ public: } break; default: - return 0; + return EXIT_SUCCESS; } return 1; } @@ -177,7 +167,7 @@ public: if (setup_pixel_format() != 0) { std::cerr << "Can't setup pixel format" << std::endl; - return -1; + return EXIT_FAILURE; } m_hRC = wglCreateContext(m_hDC); @@ -201,25 +191,25 @@ public: cv::ocl::Context::getDefault().device(0).name() : (char*) "No OpenCL device"; - return 0; + return EXIT_SUCCESS; } // init() int get_frame(cv::ogl::Texture2D& texture, cv::ogl::Buffer& buffer, bool do_buffer) { if (!m_cap.read(m_frame_bgr)) - return -1; + return EXIT_FAILURE; - cv::cvtColor(m_frame_bgr, m_frame_rgba, COLOR_RGB2RGBA); + cv::cvtColor(m_frame_bgr, m_frame_rgba, cv::COLOR_RGB2RGBA); if (do_buffer) buffer.copyFrom(m_frame_rgba, cv::ogl::Buffer::PIXEL_UNPACK_BUFFER, true); else texture.copyFrom(m_frame_rgba, true); - return 0; + return EXIT_SUCCESS; } - void print_info(MODE mode, float time, cv::String& oclDevName) + void print_info(MODE mode, double time, cv::String& oclDevName) { #if defined(_WIN32) HDC hDC = m_hDC; @@ -270,7 +260,7 @@ public: try { if (m_shutdown) - return 0; + return EXIT_SUCCESS; int r; cv::ogl::Texture2D texture; @@ -285,7 +275,7 @@ public: r = get_frame(texture, buffer, do_buffer); if (r != 0) { - return -1; + return EXIT_FAILURE; } switch (mode) @@ -331,7 +321,7 @@ public: glXSwapBuffers(m_display, m_window); #endif - print_info(mode, m_timer.time(Timer::MSEC), m_oclDevName); + print_info(mode, m_timer.getTimeMilli(), m_oclDevName); } @@ -341,7 +331,7 @@ public: return 10; } - return 0; + return EXIT_SUCCESS; } protected: @@ -350,6 +340,7 @@ protected: { cv::Mat m(m_height, m_width, CV_8UC4); + m_timer.reset(); m_timer.start(); if (do_buffer) @@ -375,6 +366,7 @@ protected: { cv::UMat u; + m_timer.reset(); m_timer.start(); if (do_buffer) @@ -430,12 +422,12 @@ protected: int pfmt = ChoosePixelFormat(m_hDC, &pfd); if (pfmt == 0) - return -1; + return EXIT_FAILURE; if (SetPixelFormat(m_hDC, pfmt, &pfd) == 0) return -2; - return 0; + return EXIT_SUCCESS; } #endif @@ -473,23 +465,10 @@ private: cv::String m_oclDevName; }; -static void help() -{ - printf( - "\nSample demonstrating interoperability of OpenGL and OpenCL with OpenCV.\n" - "Hot keys: \n" - " SPACE - turn processing on/off\n" - " 1 - process GL data through OpenCV on CPU\n" - " 2 - process GL data through OpenCV on GPU (via OpenCL)\n" - " 9 - toggle use of GL texture/GL buffer\n" - " ESC - exit\n\n"); -} - static const char* keys = { - "{c camera | true | use camera or not}" + "{c camera | 0 | camera id }" "{f file | | movie file name }" - "{h help | false | print help info }" }; using namespace cv; @@ -498,29 +477,31 @@ using namespace std; int main(int argc, char** argv) { cv::CommandLineParser parser(argc, argv, keys); - bool useCamera = parser.get("camera"); + int camera_id = parser.get("camera"); string file = parser.get("file"); - bool showHelp = parser.get("help"); - if (showHelp) - { - help(); - return 0; - } + parser.about( + "\nA sample program demonstrating interoperability of OpenGL and OpenCL with OpenCV.\n\n" + "Hot keys: \n" + " SPACE - turn processing on/off\n" + " 1 - process GL data through OpenCV on CPU\n" + " 2 - process GL data through OpenCV on GPU (via OpenCL)\n" + " 9 - toggle use of GL texture/GL buffer\n" + " ESC - exit\n\n"); parser.printMessage(); cv::VideoCapture cap; - if (useCamera) - cap.open(0); + if (file.empty()) + cap.open(camera_id); else cap.open(file.c_str()); if (!cap.isOpened()) { printf("can not open camera or video file\n"); - return -1; + return EXIT_FAILURE; } int width = (int)cap.get(CAP_PROP_FRAME_WIDTH); diff --git a/samples/opengl/winapp.hpp b/samples/opengl/winapp.hpp index 75df353343..c4f492dff0 100644 --- a/samples/opengl/winapp.hpp +++ b/samples/opengl/winapp.hpp @@ -22,54 +22,6 @@ #define SAFE_RELEASE(p) if (p) { p->Release(); p = NULL; } -class Timer -{ -public: - enum UNITS - { - USEC = 0, - MSEC, - SEC - }; - - Timer() : m_t0(0), m_diff(0) - { - m_tick_frequency = (float)cv::getTickFrequency(); - - m_unit_mul[USEC] = 1000000; - m_unit_mul[MSEC] = 1000; - m_unit_mul[SEC] = 1; - } - - void clear() - { - m_t0 = m_diff = 0; - } - - void start() - { - m_t0 = cv::getTickCount(); - } - - void stop() - { - m_diff = cv::getTickCount() - m_t0; - } - - float time(UNITS u = MSEC) - { - float sec = m_diff / m_tick_frequency; - - return sec * m_unit_mul[u]; - } - -public: - float m_tick_frequency; - int64 m_t0; - int64 m_diff; - int m_unit_mul[3]; -}; - class WinApp { public: @@ -253,18 +205,18 @@ protected: virtual void idle() = 0; #if defined(_WIN32) - HINSTANCE m_hInstance; - HWND m_hWnd; + HINSTANCE m_hInstance; + HWND m_hWnd; #elif defined(__linux__) - Display* m_display; - XVisualInfo* m_visual_info; - Window m_window; - long m_event_mask; - Atom m_WM_DELETE_WINDOW; - bool m_end_loop; + Display* m_display; + XVisualInfo* m_visual_info; + Window m_window; + long m_event_mask; + Atom m_WM_DELETE_WINDOW; + bool m_end_loop; #endif - int m_width; - int m_height; - std::string m_window_name; - Timer m_timer; + int m_width; + int m_height; + std::string m_window_name; + cv::TickMeter m_timer; };