Merge branch '4.x' into coordinate

This commit is contained in:
Ginkgo 2024-10-17 14:08:38 +08:00
commit 02ed3ba853
86 changed files with 3606 additions and 3009 deletions

View File

@ -1962,4 +1962,20 @@ inline int TEGRA_LKOpticalFlowLevel(const uchar *prev_data, size_t prev_data_ste
#define cv_hal_LKOpticalFlowLevel TEGRA_LKOpticalFlowLevel
#endif // __ARM_ARCH=7
#if 0 // OpenCV provides fater parallel implementation
inline int TEGRA_ScharrDeriv(const uchar* src_data, size_t src_step,
short* dst_data, size_t dst_step,
int width, int height, int cn)
{
if (!CAROTENE_NS::isSupportedConfiguration())
return CV_HAL_ERROR_NOT_IMPLEMENTED;
CAROTENE_NS::ScharrDeriv(CAROTENE_NS::Size2D(width, height), cn, src_data, src_step, dst_data, dst_step);
return CV_HAL_ERROR_OK;
}
#undef cv_hal_ScharrDeriv
#define cv_hal_ScharrDeriv TEGRA_ScharrDeriv
#endif
#endif

View File

@ -19,4 +19,8 @@
#include "version/hal_rvv_071.hpp"
#endif
#endif
#if defined(__riscv_v) && __riscv_v == 1000000
#include "hal_rvv_1p0/merge.hpp" // core
#endif
#endif

363
3rdparty/hal_rvv/hal_rvv_1p0/merge.hpp vendored Normal file
View File

@ -0,0 +1,363 @@
#ifndef OPENCV_HAL_RVV_MERGE_HPP_INCLUDED
#define OPENCV_HAL_RVV_MERGE_HPP_INCLUDED
#include <riscv_vector.h>
namespace cv { namespace cv_hal_rvv {
#undef cv_hal_merge8u
#define cv_hal_merge8u cv::cv_hal_rvv::merge8u
#undef cv_hal_merge16u
#define cv_hal_merge16u cv::cv_hal_rvv::merge16u
#undef cv_hal_merge32s
#define cv_hal_merge32s cv::cv_hal_rvv::merge32s
#undef cv_hal_merge64s
#define cv_hal_merge64s cv::cv_hal_rvv::merge64s
#if defined __GNUC__
__attribute__((optimize("no-tree-vectorize")))
#endif
static int merge8u(const uchar** src, uchar* dst, int len, int cn ) {
int k = cn % 4 ? cn % 4 : 4;
int i = 0, j;
int vl = __riscv_vsetvlmax_e8m1();
if( k == 1 )
{
const uchar* src0 = src[0];
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle8_v_u8m1(src0 + i, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*2, a, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++)
dst[i*cn] = src0[i];
}
else if( k == 2 )
{
const uchar *src0 = src[0], *src1 = src[1];
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle8_v_u8m1(src0 + i, vl);
auto b = __riscv_vle8_v_u8m1(src1 + i, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*2, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 1, sizeof(uchar)*2, b, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++ )
{
dst[i*cn] = src0[i];
dst[i*cn+1] = src1[i];
}
}
else if( k == 3 )
{
const uchar *src0 = src[0], *src1 = src[1], *src2 = src[2];
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle8_v_u8m1(src0 + i, vl);
auto b = __riscv_vle8_v_u8m1(src1 + i, vl);
auto c = __riscv_vle8_v_u8m1(src2 + i, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*3, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 1, sizeof(uchar)*3, b, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 2, sizeof(uchar)*3, c, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++ )
{
dst[i*cn] = src0[i];
dst[i*cn+1] = src1[i];
dst[i*cn+2] = src2[i];
}
}
else
{
const uchar *src0 = src[0], *src1 = src[1], *src2 = src[2], *src3 = src[3];
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle8_v_u8m1(src0 + i, vl);
auto b = __riscv_vle8_v_u8m1(src1 + i, vl);
auto c = __riscv_vle8_v_u8m1(src2 + i, vl);
auto d = __riscv_vle8_v_u8m1(src3 + i, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*4, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 1, sizeof(uchar)*4, b, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 2, sizeof(uchar)*4, c, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 3, sizeof(uchar)*4, d, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++ )
{
dst[i*cn] = src0[i];
dst[i*cn+1] = src1[i];
dst[i*cn+2] = src2[i];
dst[i*cn+3] = src3[i];
}
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; k < cn; k += 4 )
{
const uchar *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
for( i = 0, j = k; i < len; i++, j += cn )
{
dst[j] = src0[i]; dst[j+1] = src1[i];
dst[j+2] = src2[i]; dst[j+3] = src3[i];
}
}
return CV_HAL_ERROR_OK;
}
#if defined __GNUC__
__attribute__((optimize("no-tree-vectorize")))
#endif
static int merge16u(const ushort** src, ushort* dst, int len, int cn ) {
int k = cn % 4 ? cn % 4 : 4;
int i = 0, j;
int vl = __riscv_vsetvlmax_e16m1();
if( k == 1 )
{
const ushort* src0 = src[0];
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle16_v_u16m1(src0 + i, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*2, a, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++)
dst[i*cn] = src0[i];
}
else if( k == 2 )
{
const ushort *src0 = src[0], *src1 = src[1];
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle16_v_u16m1(src0 + i, vl);
auto b = __riscv_vle16_v_u16m1(src1 + i, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*2, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 1, sizeof(ushort)*2, b, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++ )
{
dst[i*cn] = src0[i];
dst[i*cn+1] = src1[i];
}
}
else if( k == 3 )
{
const ushort *src0 = src[0], *src1 = src[1], *src2 = src[2];
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle16_v_u16m1(src0 + i, vl);
auto b = __riscv_vle16_v_u16m1(src1 + i, vl);
auto c = __riscv_vle16_v_u16m1(src2 + i, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*3, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 1, sizeof(ushort)*3, b, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 2, sizeof(ushort)*3, c, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++ )
{
dst[i*cn] = src0[i];
dst[i*cn+1] = src1[i];
dst[i*cn+2] = src2[i];
}
}
else
{
const ushort *src0 = src[0], *src1 = src[1], *src2 = src[2], *src3 = src[3];
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle16_v_u16m1(src0 + i, vl);
auto b = __riscv_vle16_v_u16m1(src1 + i, vl);
auto c = __riscv_vle16_v_u16m1(src2 + i, vl);
auto d = __riscv_vle16_v_u16m1(src3 + i, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*4, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 1, sizeof(ushort)*4, b, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 2, sizeof(ushort)*4, c, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 3, sizeof(ushort)*4, d, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++ )
{
dst[i*cn] = src0[i];
dst[i*cn+1] = src1[i];
dst[i*cn+2] = src2[i];
dst[i*cn+3] = src3[i];
}
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; k < cn; k += 4 )
{
const uint16_t *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
for( i = 0, j = k; i < len; i++, j += cn )
{
dst[j] = src0[i]; dst[j+1] = src1[i];
dst[j+2] = src2[i]; dst[j+3] = src3[i];
}
}
return CV_HAL_ERROR_OK;
}
#if defined __GNUC__
__attribute__((optimize("no-tree-vectorize")))
#endif
static int merge32s(const int** src, int* dst, int len, int cn ) {
int k = cn % 4 ? cn % 4 : 4;
int i, j;
if( k == 1 )
{
const int* src0 = src[0];
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( i = j = 0; i < len; i++, j += cn )
dst[j] = src0[i];
}
else if( k == 2 )
{
const int *src0 = src[0], *src1 = src[1];
i = j = 0;
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++, j += cn )
{
dst[j] = src0[i];
dst[j+1] = src1[i];
}
}
else if( k == 3 )
{
const int *src0 = src[0], *src1 = src[1], *src2 = src[2];
i = j = 0;
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++, j += cn )
{
dst[j] = src0[i];
dst[j+1] = src1[i];
dst[j+2] = src2[i];
}
}
else
{
const int *src0 = src[0], *src1 = src[1], *src2 = src[2], *src3 = src[3];
i = j = 0;
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++, j += cn )
{
dst[j] = src0[i]; dst[j+1] = src1[i];
dst[j+2] = src2[i]; dst[j+3] = src3[i];
}
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; k < cn; k += 4 )
{
const int *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
for( i = 0, j = k; i < len; i++, j += cn )
{
dst[j] = src0[i]; dst[j+1] = src1[i];
dst[j+2] = src2[i]; dst[j+3] = src3[i];
}
}
return CV_HAL_ERROR_OK;
}
#if defined __GNUC__
__attribute__((optimize("no-tree-vectorize")))
#endif
static int merge64s(const int64** src, int64* dst, int len, int cn ) {
int k = cn % 4 ? cn % 4 : 4;
int i, j;
if( k == 1 )
{
const int64* src0 = src[0];
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( i = j = 0; i < len; i++, j += cn )
dst[j] = src0[i];
}
else if( k == 2 )
{
const int64 *src0 = src[0], *src1 = src[1];
i = j = 0;
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++, j += cn )
{
dst[j] = src0[i];
dst[j+1] = src1[i];
}
}
else if( k == 3 )
{
const int64 *src0 = src[0], *src1 = src[1], *src2 = src[2];
i = j = 0;
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++, j += cn )
{
dst[j] = src0[i];
dst[j+1] = src1[i];
dst[j+2] = src2[i];
}
}
else
{
const int64 *src0 = src[0], *src1 = src[1], *src2 = src[2], *src3 = src[3];
i = j = 0;
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++, j += cn )
{
dst[j] = src0[i]; dst[j+1] = src1[i];
dst[j+2] = src2[i]; dst[j+3] = src3[i];
}
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; k < cn; k += 4 )
{
const int64 *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
for( i = 0, j = k; i < len; i++, j += cn )
{
dst[j] = src0[i]; dst[j+1] = src1[i];
dst[j+2] = src2[i]; dst[j+3] = src3[i];
}
}
return CV_HAL_ERROR_OK;
}
}}
#endif

View File

@ -1,8 +1,8 @@
project(kleidicv_hal)
set(KLEIDICV_SOURCE_PATH "" CACHE PATH "Directory containing KleidiCV sources")
ocv_update(KLEIDICV_SRC_COMMIT "0.1.0")
ocv_update(KLEIDICV_SRC_HASH "9388f28cf2fbe3338197b2b57d491468")
ocv_update(KLEIDICV_SRC_COMMIT "0.2.0")
ocv_update(KLEIDICV_SRC_HASH "dabe522e8f55ac342d07a787391dab80")
if(KLEIDICV_SOURCE_PATH)
set(THE_ROOT "${KLEIDICV_SOURCE_PATH}")

View File

@ -52,6 +52,10 @@ if(POLICY CMP0056)
cmake_policy(SET CMP0056 NEW) # try_compile(): link flags
endif()
if(POLICY CMP0057)
cmake_policy(SET CMP0057 NEW) # CMake 3.3: if(IN_LIST) support
endif()
if(POLICY CMP0066)
cmake_policy(SET CMP0066 NEW) # CMake 3.7: try_compile(): use per-config flags, like CMAKE_CXX_FLAGS_RELEASE
endif()
@ -216,7 +220,7 @@ OCV_OPTION(WITH_1394 "Include IEEE1394 support" OFF
OCV_OPTION(WITH_AVFOUNDATION "Use AVFoundation for Video I/O (iOS/visionOS/Mac)" ON
VISIBLE_IF APPLE
VERIFY HAVE_AVFOUNDATION)
OCV_OPTION(WITH_AVIF "Enable AVIF support" OFF
OCV_OPTION(WITH_AVIF "Enable AVIF support" ON
VERIFY HAVE_AVIF)
OCV_OPTION(WITH_CAP_IOS "Enable iOS video capture" ON
VISIBLE_IF IOS

View File

@ -353,23 +353,23 @@ function(ocv_target_include_directories target)
#ocv_debug_message("ocv_target_include_directories(${target} ${ARGN})")
_ocv_fix_target(target)
set(__params "")
if(CV_GCC AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6.0" AND
";${ARGN};" MATCHES "/usr/include;")
return() # workaround for GCC 6.x bug
endif()
set(__params "")
set(__system_params "")
set(__var_name __params)
foreach(dir ${ARGN})
if("${dir}" STREQUAL "SYSTEM")
set(__var_name __system_params)
else()
get_filename_component(__abs_dir "${dir}" ABSOLUTE)
ocv_is_opencv_directory(__is_opencv_dir "${dir}")
if(__is_opencv_dir)
list(APPEND ${__var_name} "${__abs_dir}")
if(CV_GCC AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS "6.0" AND
dir MATCHES "/usr/include$")
# workaround for GCC 6.x bug
else()
list(APPEND ${__var_name} "${dir}")
get_filename_component(__abs_dir "${dir}" ABSOLUTE)
ocv_is_opencv_directory(__is_opencv_dir "${dir}")
if(__is_opencv_dir)
list(APPEND ${__var_name} "${__abs_dir}")
else()
list(APPEND ${__var_name} "${dir}")
endif()
endif()
endif()
endforeach()

View File

@ -4,9 +4,9 @@ Arithmetic Operations on Images {#tutorial_js_image_arithmetics}
Goal
----
- Learn several arithmetic operations on images like addition, subtraction, bitwise operations
- Learn several arithmetic operations on images like addition, subtraction, bitwise operations,
etc.
- You will learn these functions : **cv.add()**, **cv.subtract()** etc.
- You will learn these functions : **cv.add()**, **cv.subtract()**, etc.
Image Addition
--------------

View File

@ -1,4 +1,4 @@
File Input and Output using XML and YAML files {#tutorial_file_input_output_with_xml_yml}
File Input and Output using XML / YAML / JSON files {#tutorial_file_input_output_with_xml_yml}
==============================================
@tableofcontents
@ -14,12 +14,12 @@ File Input and Output using XML and YAML files {#tutorial_file_input_output_with
Goal
----
You'll find answers for the following questions:
You'll find answers to the following questions:
- How to print and read text entries to a file and OpenCV using YAML or XML files?
- How to do the same for OpenCV data structures?
- How to do this for your data structures?
- Usage of OpenCV data structures such as @ref cv::FileStorage , @ref cv::FileNode or @ref
- How do you print and read text entries to a file in OpenCV using YAML, XML, or JSON files?
- How can you perform the same operations for OpenCV data structures?
- How can this be done for your custom data structures?
- How do you use OpenCV data structures, such as @ref cv::FileStorage , @ref cv::FileNode or @ref
cv::FileNodeIterator .
Source code
@ -49,14 +49,14 @@ Here's a sample code of how to achieve all the stuff enumerated at the goal list
Explanation
-----------
Here we talk only about XML and YAML file inputs. Your output (and its respective input) file may
Here we talk only about XML, YAML and JSON file inputs. Your output (and its respective input) file may
have only one of these extensions and the structure coming from this. They are two kinds of data
structures you may serialize: *mappings* (like the STL map and the Python dictionary) and *element sequence* (like the STL
vector). The difference between these is that in a map every element has a unique name through what
you may access it. For sequences you need to go through them to query a specific item.
-# **XML/YAML File Open and Close.** Before you write any content to such file you need to open it
and at the end to close it. The XML/YAML data structure in OpenCV is @ref cv::FileStorage . To
-# **XML/YAML/JSON File Open and Close.** Before you write any content to such file you need to open it
and at the end to close it. The XML/YAML/JSON data structure in OpenCV is @ref cv::FileStorage . To
specify that this structure to which file binds on your hard drive you can use either its
constructor or the *open()* function of this:
@add_toggle_cpp

View File

@ -1,3 +1,10 @@
// Implementation of SQPnP as described in the paper:
//
// "A Consistently Fast and Globally Optimal Solution to the Perspective-n-Point Problem" by G. Terzakis and M. Lourakis
// a) Paper: https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123460460.pdf
// b) Supplementary: https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123460460-supp.pdf
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
@ -39,6 +46,10 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "precomp.hpp"
#include "sqpnp.hpp"
#ifdef HAVE_EIGEN
#include <Eigen/Dense>
#endif
#include <opencv2/calib3d.hpp>
namespace cv {
@ -54,8 +65,8 @@ const double PoseSolver::POINT_VARIANCE_THRESHOLD = 1e-5;
const double PoseSolver::SQRT3 = std::sqrt(3);
const int PoseSolver::SQP_MAX_ITERATION = 15;
//No checking done here for overflow, since this is not public all call instances
//are assumed to be valid
// No checking done here for overflow, since this is not public all call instances
// are assumed to be valid
template <typename tp, int snrows, int sncols,
int dnrows, int dncols>
void set(int row, int col, cv::Matx<tp, dnrows, dncols>& dest,
@ -80,7 +91,7 @@ PoseSolver::PoseSolver()
void PoseSolver::solve(InputArray objectPoints, InputArray imagePoints, OutputArrayOfArrays rvecs,
OutputArrayOfArrays tvecs)
{
//Input checking
// Input checking
int objType = objectPoints.getMat().type();
CV_CheckType(objType, objType == CV_32FC3 || objType == CV_64FC3,
"Type of objectPoints must be CV_32FC3 or CV_64FC3");
@ -160,12 +171,12 @@ void PoseSolver::computeOmega(InputArray objectPoints, InputArray imagePoints)
sum_img += img_pt;
sum_obj += obj_pt;
const double& x = img_pt.x, & y = img_pt.y;
const double& X = obj_pt.x, & Y = obj_pt.y, & Z = obj_pt.z;
const double x = img_pt.x, y = img_pt.y;
const double X = obj_pt.x, Y = obj_pt.y, Z = obj_pt.z;
double sq_norm = x * x + y * y;
sq_norm_sum += sq_norm;
double X2 = X * X,
const double X2 = X * X,
XY = X * Y,
XZ = X * Z,
Y2 = Y * Y,
@ -180,47 +191,47 @@ void PoseSolver::computeOmega(InputArray objectPoints, InputArray imagePoints)
omega_(2, 2) += Z2;
//Populating this manually saves operations by only calculating upper triangle
omega_(0, 6) += -x * X2; omega_(0, 7) += -x * XY; omega_(0, 8) += -x * XZ;
omega_(1, 7) += -x * Y2; omega_(1, 8) += -x * YZ;
omega_(2, 8) += -x * Z2;
// Populating this manually saves operations by only calculating upper triangle
omega_(0, 6) -= x * X2; omega_(0, 7) -= x * XY; omega_(0, 8) -= x * XZ;
omega_(1, 7) -= x * Y2; omega_(1, 8) -= x * YZ;
omega_(2, 8) -= x * Z2;
omega_(3, 6) += -y * X2; omega_(3, 7) += -y * XY; omega_(3, 8) += -y * XZ;
omega_(4, 7) += -y * Y2; omega_(4, 8) += -y * YZ;
omega_(5, 8) += -y * Z2;
omega_(3, 6) -= y * X2; omega_(3, 7) -= y * XY; omega_(3, 8) -= y * XZ;
omega_(4, 7) -= y * Y2; omega_(4, 8) -= y * YZ;
omega_(5, 8) -= y * Z2;
omega_(6, 6) += sq_norm * X2; omega_(6, 7) += sq_norm * XY; omega_(6, 8) += sq_norm * XZ;
omega_(7, 7) += sq_norm * Y2; omega_(7, 8) += sq_norm * YZ;
omega_(8, 8) += sq_norm * Z2;
//Compute qa_sum. Certain pairs of elements are equal, so filling them outside the loop saves some operations
// Compute qa_sum. Certain pairs of elements are equal, so filling them outside the loop saves some operations
qa_sum(0, 0) += X; qa_sum(0, 1) += Y; qa_sum(0, 2) += Z;
qa_sum(0, 6) += -x * X; qa_sum(0, 7) += -x * Y; qa_sum(0, 8) += -x * Z;
qa_sum(1, 6) += -y * X; qa_sum(1, 7) += -y * Y; qa_sum(1, 8) += -y * Z;
qa_sum(0, 6) -= x * X; qa_sum(0, 7) -= x * Y; qa_sum(0, 8) -= x * Z;
qa_sum(1, 6) -= y * X; qa_sum(1, 7) -= y * Y; qa_sum(1, 8) -= y * Z;
qa_sum(2, 6) += sq_norm * X; qa_sum(2, 7) += sq_norm * Y; qa_sum(2, 8) += sq_norm * Z;
}
//Complete qa_sum
// Complete qa_sum
qa_sum(1, 3) = qa_sum(0, 0); qa_sum(1, 4) = qa_sum(0, 1); qa_sum(1, 5) = qa_sum(0, 2);
qa_sum(2, 0) = qa_sum(0, 6); qa_sum(2, 1) = qa_sum(0, 7); qa_sum(2, 2) = qa_sum(0, 8);
qa_sum(2, 3) = qa_sum(1, 6); qa_sum(2, 4) = qa_sum(1, 7); qa_sum(2, 5) = qa_sum(1, 8);
//lower triangles of omega_'s off-diagonal blocks (0:2, 6:8), (3:5, 6:8) and (6:8, 6:8)
// lower triangles of omega_'s off-diagonal blocks (0:2, 6:8), (3:5, 6:8) and (6:8, 6:8)
omega_(1, 6) = omega_(0, 7); omega_(2, 6) = omega_(0, 8); omega_(2, 7) = omega_(1, 8);
omega_(4, 6) = omega_(3, 7); omega_(5, 6) = omega_(3, 8); omega_(5, 7) = omega_(4, 8);
omega_(7, 6) = omega_(6, 7); omega_(8, 6) = omega_(6, 8); omega_(8, 7) = omega_(7, 8);
//upper triangle of omega_'s block (3:5, 3:5)
// upper triangle of omega_'s block (3:5, 3:5)
omega_(3, 3) = omega_(0, 0); omega_(3, 4) = omega_(0, 1); omega_(3, 5) = omega_(0, 2);
omega_(4, 4) = omega_(1, 1); omega_(4, 5) = omega_(1, 2);
omega_(5, 5) = omega_(2, 2);
//Mirror omega_'s upper triangle to lower triangle
//Note that elements (7, 6), (8, 6) & (8, 7) have already been assigned above
// Mirror omega_'s upper triangle to lower triangle
// Note that elements (7, 6), (8, 6) & (8, 7) have already been assigned above
omega_(1, 0) = omega_(0, 1);
omega_(2, 0) = omega_(0, 2); omega_(2, 1) = omega_(1, 2);
omega_(3, 0) = omega_(0, 3); omega_(3, 1) = omega_(1, 3); omega_(3, 2) = omega_(2, 3);
@ -242,12 +253,26 @@ void PoseSolver::computeOmega(InputArray objectPoints, InputArray imagePoints)
CV_Assert(point_coordinate_variance >= POINT_VARIANCE_THRESHOLD);
Matx<double, 3, 3> q_inv;
analyticalInverse3x3Symm(q, q_inv);
if (!invertSPD3x3(q, q_inv)) analyticalInverse3x3Symm(q, q_inv);
p_ = -q_inv * qa_sum;
omega_ += qa_sum.t() * p_;
#ifdef HAVE_EIGEN
// Rank revealing QR nullspace computation with full pivoting.
// This is slightly less accurate compared to SVD but x2-x3 faster
Eigen::Matrix<double, 9, 9> omega_eig, tmp_eig;
cv::cv2eigen(omega_, omega_eig);
Eigen::FullPivHouseholderQR<Eigen::Matrix<double, 9, 9> > rrqr(omega_eig);
tmp_eig = rrqr.matrixQ();
cv::eigen2cv(tmp_eig, u_);
tmp_eig = rrqr.matrixQR().template triangularView<Eigen::Upper>(); // R
Eigen::Matrix<double, 9, 1> S_eig = tmp_eig.diagonal().array().abs();
cv::eigen2cv(S_eig, s_);
#else
// Use OpenCV's SVD
cv::SVD omega_svd(omega_, cv::SVD::FULL_UV);
s_ = omega_svd.w;
u_ = cv::Mat(omega_svd.vt.t());
@ -257,6 +282,8 @@ void PoseSolver::computeOmega(InputArray objectPoints, InputArray imagePoints)
u_ = u_.t(); // eigenvectors were returned as rows
#endif
#endif // HAVE_EIGEN
CV_Assert(s_(0) >= 1e-7);
while (s_(7 - num_null_vectors_) < RANK_TOLERANCE) num_null_vectors_++;
@ -278,7 +305,7 @@ void PoseSolver::solveInternal(InputArray objectPoints)
SQPSolution solutions[2];
//If e is orthogonal, we can skip SQP
// If e is orthogonal, we can skip SQP
if (orthogonality_sq_err < ORTHOGONALITY_SQUARED_ERROR_THRESHOLD)
{
solutions[0].r_hat = det3x3(e) * e;
@ -395,6 +422,77 @@ void PoseSolver::solveSQPSystem(const cv::Matx<double, 9, 1>& r, cv::Matx<double
delta += N * y;
}
// Inverse of SPD 3x3 A via a lower triangular sqrt-free Cholesky
// factorization A=L*D*L' (L has ones on its diagonal, D is diagonal).
//
// Only the lower triangular part of A is accessed.
//
// The function returns true if successful
//
// see http://euler.nmt.edu/~brian/ldlt.html
//
bool PoseSolver::invertSPD3x3(const cv::Matx<double, 3, 3>& A, cv::Matx<double, 3, 3>& A1)
{
double L[3*3], D[3], v[2], x[3];
v[0]=D[0]=A(0, 0);
if(v[0]<=1E-10) return false;
v[1]=1.0/v[0];
L[3]=A(1, 0)*v[1];
L[6]=A(2, 0)*v[1];
//L[0]=1.0;
//L[1]=L[2]=0.0;
v[0]=L[3]*D[0];
v[1]=D[1]=A(1, 1)-L[3]*v[0];
if(v[1]<=1E-10) return false;
L[7]=(A(2, 1)-L[6]*v[0])/v[1];
//L[4]=1.0;
//L[5]=0.0;
v[0]=L[6]*D[0];
v[1]=L[7]*D[1];
D[2]=A(2, 2)-L[6]*v[0]-L[7]*v[1];
if(D[2]<=1E-10) return false;
//L[8]=1.0;
D[0]=1.0/D[0];
D[1]=1.0/D[1];
D[2]=1.0/D[2];
/* Forward solve Lx = e0 */
//x[0]=1.0;
x[1]=-L[3];
x[2]=-L[6]+L[7]*L[3];
/* Backward solve D*L'x = y */
A1(0, 2)=x[2]=x[2]*D[2];
A1(0, 1)=x[1]=x[1]*D[1]-L[7]*x[2];
A1(0, 0) = D[0]-L[3]*x[1]-L[6]*x[2];
/* Forward solve Lx = e1 */
//x[0]=0.0;
//x[1]=1.0;
x[2]=-L[7];
/* Backward solve D*L'x = y */
A1(1, 2)=x[2]=x[2]*D[2];
A1(1, 1)=x[1]= D[1]-L[7]*x[2];
A1(1, 0) = -L[3]*x[1]-L[6]*x[2];
/* Forward solve Lx = e2 */
//x[0]=0.0;
//x[1]=0.0;
//x[2]=1.0;
/* Backward solve D*L'x = y */
A1(2, 2)=x[2]=D[2];
A1(2, 1)=x[1]= -L[7]*x[2];
A1(2, 0) = -L[3]*x[1]-L[6]*x[2];
return true;
}
bool PoseSolver::analyticalInverse3x3Symm(const cv::Matx<double, 3, 3>& Q,
cv::Matx<double, 3, 3>& Qinv,
const double& threshold)
@ -413,7 +511,7 @@ bool PoseSolver::analyticalInverse3x3Symm(const cv::Matx<double, 3, 3>& Q,
t12 = c * c;
double det = -t4 * f + a * t2 + t7 * f - 2.0 * t9 * e + t12 * d;
if (fabs(det) < threshold) return false;
if (fabs(det) < threshold) { cv::invert(Q, Qinv, cv::DECOMP_SVD); return false; } // fall back to pseudoinverse
// 3. Inverse
double t15, t20, t24, t30;
@ -504,7 +602,7 @@ void PoseSolver::computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
H(6, 4) = r(3) - dot_j5q3 * H(6, 2); H(7, 4) = r(4) - dot_j5q3 * H(7, 2); H(8, 4) = r(5) - dot_j5q3 * H(8, 2);
Matx<double, 9, 1> q4 = H.col(4);
q4 /= cv::norm(q4);
q4 *= (1.0 / cv::norm(q4));
set<double, 9, 1, 9, 6>(0, 4, H, q4);
K(4, 0) = 0;
@ -533,7 +631,7 @@ void PoseSolver::computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
H(8, 5) = r(2) - dot_j6q3 * H(8, 2) - dot_j6q5 * H(8, 4);
Matx<double, 9, 1> q5 = H.col(5);
q5 /= cv::norm(q5);
q5 *= (1.0 / cv::norm(q5));
set<double, 9, 1, 9, 6>(0, 5, H, q5);
K(5, 0) = r(6) * H(0, 0) + r(7) * H(1, 0) + r(8) * H(2, 0);
@ -575,10 +673,11 @@ void PoseSolver::computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
Matx<double, 9, 1> v1 = Pn.col(index1);
v1 /= max_norm1;
set<double, 9, 1, 9, 3>(0, 0, N, v1);
col_norms[index1] = -1.0; // mark to avoid use in subsequent loops
for (int i = 0; i < 9; i++)
{
if (i == index1) continue;
//if (i == index1) continue;
if (col_norms[i] >= norm_threshold)
{
double cos_v1_x_col = fabs(Pn.col(i).dot(v1) / col_norms[i]);
@ -594,16 +693,18 @@ void PoseSolver::computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
Matx<double, 9, 1> v2 = Pn.col(index2);
Matx<double, 9, 1> n0 = N.col(0);
v2 -= v2.dot(n0) * n0;
v2 /= cv::norm(v2);
v2 *= (1.0 / cv::norm(v2));
set<double, 9, 1, 9, 3>(0, 1, N, v2);
col_norms[index2] = -1.0; // mark to avoid use in subsequent loops
for (int i = 0; i < 9; i++)
{
if (i == index2 || i == index1) continue;
//if (i == index2 || i == index1) continue;
if (col_norms[i] >= norm_threshold)
{
double cos_v1_x_col = fabs(Pn.col(i).dot(v1) / col_norms[i]);
double cos_v2_x_col = fabs(Pn.col(i).dot(v2) / col_norms[i]);
double inv_norm = 1.0 / col_norms[i];
double cos_v1_x_col = fabs(Pn.col(i).dot(v1) * inv_norm);
double cos_v2_x_col = fabs(Pn.col(i).dot(v2) * inv_norm);
if (cos_v1_x_col + cos_v2_x_col <= min_dot1323)
{
@ -616,7 +717,7 @@ void PoseSolver::computeRowAndNullspace(const cv::Matx<double, 9, 1>& r,
Matx<double, 9, 1> v3 = Pn.col(index3);
Matx<double, 9, 1> n1 = N.col(1);
v3 -= (v3.dot(n1)) * n1 - (v3.dot(n0)) * n0;
v3 /= cv::norm(v3);
v3 *= (1.0 / cv::norm(v3));
set<double, 9, 1, 9, 3>(0, 2, N, v3);
}
@ -637,17 +738,17 @@ void PoseSolver::nearestRotationMatrixSVD(const cv::Matx<double, 9, 1>& e,
// Faster nearest rotation computation based on FOAM. See M. Lourakis: "An Efficient Solution to Absolute Orientation", ICPR 2016
// and M. Lourakis, G. Terzakis: "Efficient Absolute Orientation Revisited", IROS 2018.
/* Solve the nearest orthogonal approximation problem
* i.e., given e, find R minimizing ||R-e||_F
*
* The computation borrows from Markley's FOAM algorithm
* "Attitude Determination Using Vector Observations: A Fast Optimal Matrix Algorithm", J. Astronaut. Sci. 1993.
*
* See also M. Lourakis: "An Efficient Solution to Absolute Orientation", ICPR 2016
*
* Copyright (C) 2019 Manolis Lourakis (lourakis **at** ics forth gr)
* Institute of Computer Science, Foundation for Research & Technology - Hellas
* Heraklion, Crete, Greece.
*/
* i.e., given e, find R minimizing ||R-e||_F
*
* The computation borrows from Markley's FOAM algorithm
* "Attitude Determination Using Vector Observations: A Fast Optimal Matrix Algorithm", J. Astronaut. Sci. 1993.
*
* See also M. Lourakis: "An Efficient Solution to Absolute Orientation", ICPR 2016
*
* Copyright (C) 2019 Manolis Lourakis (lourakis **at** ics forth gr)
* Institute of Computer Science, Foundation for Research & Technology - Hellas
* Heraklion, Crete, Greece.
*/
void PoseSolver::nearestRotationMatrixFOAM(const cv::Matx<double, 9, 1>& e,
cv::Matx<double, 9, 1>& r)
{
@ -655,7 +756,7 @@ void PoseSolver::nearestRotationMatrixFOAM(const cv::Matx<double, 9, 1>& e,
double l, lprev, det_e, e_sq, adj_e_sq, adj_e[9];
// det(e)
det_e = e(0) * e(4) * e(8) - e(0) * e(5) * e(7) - e(1) * e(3) * e(8) + e(2) * e(3) * e(7) + e(1) * e(6) * e(5) - e(2) * e(6) * e(4);
det_e = ( e(0) * e(4) * e(8) - e(0) * e(5) * e(7) - e(1) * e(3) * e(8) ) + ( e(2) * e(3) * e(7) + e(1) * e(6) * e(5) - e(2) * e(6) * e(4) );
if (fabs(det_e) < 1E-04) { // singular, handle it with SVD
PoseSolver::nearestRotationMatrixSVD(e, r);
return;
@ -667,8 +768,8 @@ void PoseSolver::nearestRotationMatrixFOAM(const cv::Matx<double, 9, 1>& e,
adj_e[6] = e(3) * e(7) - e(4) * e(6); adj_e[7] = e(1) * e(6) - e(0) * e(7); adj_e[8] = e(0) * e(4) - e(1) * e(3);
// ||e||^2, ||adj(e)||^2
e_sq = e(0) * e(0) + e(1) * e(1) + e(2) * e(2) + e(3) * e(3) + e(4) * e(4) + e(5) * e(5) + e(6) * e(6) + e(7) * e(7) + e(8) * e(8);
adj_e_sq = adj_e[0] * adj_e[0] + adj_e[1] * adj_e[1] + adj_e[2] * adj_e[2] + adj_e[3] * adj_e[3] + adj_e[4] * adj_e[4] + adj_e[5] * adj_e[5] + adj_e[6] * adj_e[6] + adj_e[7] * adj_e[7] + adj_e[8] * adj_e[8];
e_sq = ( e(0) * e(0) + e(1) * e(1) + e(2) * e(2) ) + ( e(3) * e(3) + e(4) * e(4) + e(5) * e(5) ) + ( e(6) * e(6) + e(7) * e(7) + e(8) * e(8) );
adj_e_sq = ( adj_e[0] * adj_e[0] + adj_e[1] * adj_e[1] + adj_e[2] * adj_e[2] ) + ( adj_e[3] * adj_e[3] + adj_e[4] * adj_e[4] + adj_e[5] * adj_e[5] ) + ( adj_e[6] * adj_e[6] + adj_e[7] * adj_e[7] + adj_e[8] * adj_e[8] );
// compute l_max with Newton-Raphson from FOAM's characteristic polynomial, i.e. eq.(23) - (26)
l = 0.5*(e_sq + 3.0); // 1/2*(trace(mat(e)*mat(e)') + trace(eye(3)))
@ -735,8 +836,8 @@ void PoseSolver::nearestRotationMatrixFOAM(const cv::Matx<double, 9, 1>& e,
double PoseSolver::det3x3(const cv::Matx<double, 9, 1>& e)
{
return e(0) * e(4) * e(8) + e(1) * e(5) * e(6) + e(2) * e(3) * e(7)
- e(6) * e(4) * e(2) - e(7) * e(5) * e(0) - e(8) * e(3) * e(1);
return ( e(0) * e(4) * e(8) + e(1) * e(5) * e(6) + e(2) * e(3) * e(7) )
- ( e(6) * e(4) * e(2) + e(7) * e(5) * e(0) + e(8) * e(3) * e(1) );
}
inline bool PoseSolver::positiveDepth(const SQPSolution& solution) const
@ -817,8 +918,8 @@ double PoseSolver::orthogonalityError(const cv::Matx<double, 9, 1>& e)
double dot_e1e3 = e(0) * e(6) + e(1) * e(7) + e(2) * e(8);
double dot_e2e3 = e(3) * e(6) + e(4) * e(7) + e(5) * e(8);
return (sq_norm_e1 - 1) * (sq_norm_e1 - 1) + (sq_norm_e2 - 1) * (sq_norm_e2 - 1) + (sq_norm_e3 - 1) * (sq_norm_e3 - 1) +
2 * (dot_e1e2 * dot_e1e2 + dot_e1e3 * dot_e1e3 + dot_e2e3 * dot_e2e3);
return ( (sq_norm_e1 - 1) * (sq_norm_e1 - 1) + (sq_norm_e2 - 1) * (sq_norm_e2 - 1) ) + ( (sq_norm_e3 - 1) * (sq_norm_e3 - 1) +
2 * (dot_e1e2 * dot_e1e2 + dot_e1e3 * dot_e1e3 + dot_e2e3 * dot_e2e3) );
}
}

View File

@ -1,3 +1,10 @@
// Implementation of SQPnP as described in the paper:
//
// "A Consistently Fast and Globally Optimal Solution to the Perspective-n-Point Problem" by G. Terzakis and M. Lourakis
// a) Paper: https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123460460.pdf
// b) Supplementary: https://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123460460-supp.pdf
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
@ -158,6 +165,13 @@ private:
*/
void solveSQPSystem(const cv::Matx<double, 9, 1>& r, cv::Matx<double, 9, 1>& delta);
/*
* @brief Inverse of SPD 3x3 A via lower triangular sqrt-free Cholesky: A = L*D*L'
* @param A The input matrix
* @param A1 The inverse
*/
static bool invertSPD3x3(const cv::Matx<double, 3, 3>& A, cv::Matx<double, 3, 3>& A1);
/*
* @brief Analytically computes the inverse of a symmetric 3x3 matrix using the
* lower triangle.

View File

@ -14,6 +14,9 @@ libraries. The following modules are available:
- @ref imgproc (**imgproc**) - an image processing module that includes linear and non-linear image filtering,
geometrical image transformations (resize, affine and perspective warping, generic table-based
remapping), color space conversion, histograms, and so on.
- @ref imgcodecs (**imgcodecs**) - includes functions for reading and writing image files in various formats.
- @ref videoio (**videoio**) - an easy-to-use interface to video capturing and video codecs.
- @ref highgui (**highgui**) - an easy-to-use interface to simple UI capabilities.
- @ref video (**video**) - a video analysis module that includes motion estimation, background subtraction,
and object tracking algorithms.
- @ref calib3d (**calib3d**) - basic multiple-view geometry algorithms, single and stereo camera calibration,
@ -21,8 +24,11 @@ libraries. The following modules are available:
- @ref features2d (**features2d**) - salient feature detectors, descriptors, and descriptor matchers.
- @ref objdetect (**objdetect**) - detection of objects and instances of the predefined classes (for example,
faces, eyes, mugs, people, cars, and so on).
- @ref highgui (**highgui**) - an easy-to-use interface to simple UI capabilities.
- @ref videoio (**videoio**) - an easy-to-use interface to video capturing and video codecs.
- @ref dnn (**dnn**) - Deep Neural Network module.
- @ref ml (**ml**) - The Machine Learning module includes a set of classes and functions for statistical classification,
regression, and clustering of data.
- @ref photo (**photo**) - advanced photo processing techniques like denoising, inpainting.
- @ref stitching (**stitching**) - functions for image stitching and panorama creation.
- ... some other helper modules, such as FLANN and Google test wrappers, Python bindings, and
others.

View File

@ -60,11 +60,16 @@
/**
@defgroup core Core functionality
The Core module is the backbone of OpenCV, offering fundamental data structures, matrix operations,
and utility functions that other modules depend on. Its essential for handling image data,
performing mathematical computations, and managing memory efficiently within the OpenCV ecosystem.
@{
@defgroup core_basic Basic structures
@defgroup core_array Operations on arrays
@defgroup core_async Asynchronous API
@defgroup core_xml XML/YAML Persistence
@defgroup core_xml XML/YAML/JSON Persistence
@defgroup core_cluster Clustering
@defgroup core_utils Utility and system functions and macros
@{
@ -76,7 +81,6 @@
@defgroup core_utils_samples Utility functions for OpenCV samples
@}
@defgroup core_opengl OpenGL interoperability
@defgroup core_ipp Intel IPP Asynchronous C/C++ Converters
@defgroup core_optim Optimization Algorithms
@defgroup core_directx DirectX interoperability
@defgroup core_eigen Eigen support
@ -96,6 +100,7 @@
@{
@defgroup core_parallel_backend Parallel backends API
@}
@defgroup core_quaternion Quaternion
@}
*/
@ -163,7 +168,7 @@ enum SortFlags { SORT_EVERY_ROW = 0, //!< each matrix row is sorted independe
//! @} core_utils
//! @addtogroup core
//! @addtogroup core_array
//! @{
//! Covariation flags
@ -202,27 +207,6 @@ enum CovarFlags {
COVAR_COLS = 16
};
//! @addtogroup core_cluster
//! @{
//! k-Means flags
enum KmeansFlags {
/** Select random initial centers in each attempt.*/
KMEANS_RANDOM_CENTERS = 0,
/** Use kmeans++ center initialization by Arthur and Vassilvitskii [Arthur2007].*/
KMEANS_PP_CENTERS = 2,
/** During the first (and possibly the only) attempt, use the
user-supplied labels instead of computing them from the initial centers. For the second and
further attempts, use the random or semi-random centers. Use one of KMEANS_\*_CENTERS flag
to specify the exact method.*/
KMEANS_USE_INITIAL_LABELS = 1
};
//! @} core_cluster
//! @addtogroup core_array
//! @{
enum ReduceTypes { REDUCE_SUM = 0, //!< the output is the sum of all rows/columns of the matrix.
REDUCE_AVG = 1, //!< the output is the mean vector of all rows/columns of the matrix.
REDUCE_MAX = 2, //!< the output is the maximum (column/row-wise) of all rows/columns of the matrix.
@ -230,19 +214,12 @@ enum ReduceTypes { REDUCE_SUM = 0, //!< the output is the sum of all rows/column
REDUCE_SUM2 = 4 //!< the output is the sum of all squared rows/columns of the matrix.
};
//! @} core_array
/** @brief Swaps two matrices
*/
CV_EXPORTS void swap(Mat& a, Mat& b);
/** @overload */
CV_EXPORTS void swap( UMat& a, UMat& b );
//! @} core
//! @addtogroup core_array
//! @{
/** @brief Computes the source location of an extrapolated pixel.
The function computes and returns the coordinate of a donor pixel corresponding to the specified
@ -557,6 +534,10 @@ The format of half precision floating point is defined in IEEE 754-2008.
*/
CV_EXPORTS_W void convertFp16(InputArray src, OutputArray dst);
/** @example samples/cpp/tutorial_code/core/how_to_scan_images/how_to_scan_images.cpp
Check @ref tutorial_how_to_scan_images "the corresponding tutorial" for more details
*/
/** @brief Performs a look-up table transform of an array.
The function LUT fills the output array with values from the look-up table. Indices of the entries
@ -3085,8 +3066,21 @@ private:
//! @addtogroup core_cluster
//! @{
//! k-means flags
enum KmeansFlags {
/** Select random initial centers in each attempt.*/
KMEANS_RANDOM_CENTERS = 0,
/** Use kmeans++ center initialization by Arthur and Vassilvitskii [Arthur2007].*/
KMEANS_PP_CENTERS = 2,
/** During the first (and possibly the only) attempt, use the
user-supplied labels instead of computing them from the initial centers. For the second and
further attempts, use the random or semi-random centers. Use one of KMEANS_\*_CENTERS flag
to specify the exact method.*/
KMEANS_USE_INITIAL_LABELS = 1
};
/** @example samples/cpp/kmeans.cpp
An example on K-means clustering
An example on k-means clustering
*/
/** @brief Finds centers of clusters and groups input samples around the clusters.
@ -3096,7 +3090,7 @@ and groups the input samples around the clusters. As an output, \f$\texttt{bestL
0-based cluster index for the sample stored in the \f$i^{th}\f$ row of the samples matrix.
@note
- (Python) An example on K-means clustering can be found at
- (Python) An example on k-means clustering can be found at
opencv_source_code/samples/python/kmeans.py
@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.
Examples of this array can be:

View File

@ -51,7 +51,7 @@
namespace cv
{
//! @addtogroup core
//! @addtogroup core_eigen
//! @{
/** @brief Affine transform

View File

@ -288,28 +288,28 @@ enum BorderTypes {
By default the function prints information about the error to stderr,
then it either stops if setBreakOnError() had been called before or raises the exception.
It is possible to alternate error processing by using redirectError().
@param _code - error code (Error::Code)
@param _err - error description
@param _func - function name. Available only when the compiler supports getting it
@param _file - source file name where the error has occurred
@param _line - line number in the source file where the error has occurred
@param code - error code (Error::Code)
@param err - error description
@param func - function name. Available only when the compiler supports getting it
@param file - source file name where the error has occurred
@param line - line number in the source file where the error has occurred
@see CV_Error, CV_Error_, CV_Assert, CV_DbgAssert
*/
CV_EXPORTS CV_NORETURN void error(int _code, const String& _err, const char* _func, const char* _file, int _line);
CV_EXPORTS CV_NORETURN void error(int code, const String& err, const char* func, const char* file, int line);
/*! @brief Signals an error and terminate application.
By default the function prints information about the error to stderr, then it terminates application
with std::terminate. The function is designed for invariants check in functions and methods with
noexcept attribute.
@param _code - error code (Error::Code)
@param _err - error description
@param _func - function name. Available only when the compiler supports getting it
@param _file - source file name where the error has occurred
@param _line - line number in the source file where the error has occurred
@param code - error code (Error::Code)
@param err - error description
@param func - function name. Available only when the compiler supports getting it
@param file - source file name where the error has occurred
@param line - line number in the source file where the error has occurred
@see CV_AssertTerminate
*/
CV_EXPORTS CV_NORETURN void terminate(int _code, const String& _err, const char* _func, const char* _file, int _line) CV_NOEXCEPT;
CV_EXPORTS CV_NORETURN void terminate(int code, const String& err, const char* func, const char* file, int line) CV_NOEXCEPT;
#ifdef CV_STATIC_ANALYSIS

View File

@ -15,7 +15,7 @@
namespace cv
{
//! @addtogroup core
//! @addtogroup core_opencl
//! @{
class BufferPoolController

View File

@ -928,6 +928,17 @@ typedef hfloat float16_t;
}
#endif
/** @brief Constructs the 'fourcc' code, used in video codecs and many other places.
Simply call it with 4 chars like `CV_FOURCC('I', 'Y', 'U', 'V')`
*/
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
{
return (c1 & 255) + ((c2 & 255) << 8) + ((c3 & 255) << 16) + ((c4 & 255) << 24);
}
//! Macro to construct the fourcc code of the codec. Same as CV_FOURCC()
#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
//! @}
#ifndef __cplusplus

View File

@ -30,7 +30,7 @@
#include <opencv2/core/affine.hpp>
namespace cv{
//! @addtogroup core
//! @addtogroup core_quaternion
//! @{
template <typename _Tp> class DualQuat;

View File

@ -191,6 +191,19 @@ CV_INTRIN_DEF_TYPE_TRAITS_NO_Q_TYPE(double, int64, uint64, double, void, double)
#endif // CV_CPU_OPTIMIZATION_HAL_NAMESPACE
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_BEGIN
template <typename _VecTp> inline _VecTp v_setzero_();
template <typename _VecTp> inline _VecTp v_setall_(uchar);
template <typename _VecTp> inline _VecTp v_setall_(schar);
template <typename _VecTp> inline _VecTp v_setall_(ushort);
template <typename _VecTp> inline _VecTp v_setall_(short);
template <typename _VecTp> inline _VecTp v_setall_(unsigned);
template <typename _VecTp> inline _VecTp v_setall_(int);
template <typename _VecTp> inline _VecTp v_setall_(uint64);
template <typename _VecTp> inline _VecTp v_setall_(int64);
template <typename _VecTp> inline _VecTp v_setall_(float);
template <typename _VecTp> inline _VecTp v_setall_(double);
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
using namespace CV_CPU_OPTIMIZATION_HAL_NAMESPACE;
#endif
@ -958,7 +971,6 @@ namespace CV__SIMD_NAMESPACE {
#define CV_SIMD 0
#endif
#include "intrin_math.hpp"
#include "simd_utils.impl.hpp"
#ifndef CV_DOXYGEN

View File

@ -447,6 +447,10 @@ OPENCV_HAL_IMPL_AVX_LOADSTORE_FLT(v_float64x4, double, pd, __m128d)
{ return _Tpvec(_mm256_setzero_si256()); } \
inline _Tpvec v256_setall_##suffix(_Tp v) \
{ return _Tpvec(_mm256_set1_##ssuffix((ctype_s)v)); } \
template <> inline _Tpvec v_setzero_() \
{ return v256_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) \
{ return v256_setall_##suffix(v); } \
OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint8x32, suffix, OPENCV_HAL_NOP) \
OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int8x32, suffix, OPENCV_HAL_NOP) \
OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint16x16, suffix, OPENCV_HAL_NOP) \
@ -472,6 +476,10 @@ OPENCV_HAL_IMPL_AVX_INIT(v_int64x4, int64, s64, epi64x, int64)
{ return _Tpvec(_mm256_setzero_##zsuffix()); } \
inline _Tpvec v256_setall_##suffix(_Tp v) \
{ return _Tpvec(_mm256_set1_##zsuffix(v)); } \
template <> inline _Tpvec v_setzero_() \
{ return v256_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) \
{ return v256_setall_##suffix(v); } \
OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint8x32, suffix, cast) \
OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_int8x32, suffix, cast) \
OPENCV_HAL_IMPL_AVX_CAST(_Tpvec, v_uint16x16, suffix, cast) \
@ -3158,6 +3166,20 @@ inline void v_pack_store(hfloat* ptr, const v_float32x8& a)
inline void v256_cleanup() { _mm256_zeroall(); }
#include "intrin_math.hpp"
inline v_float32x8 v_exp(const v_float32x8& x) { return v_exp_default_32f<v_float32x8, v_int32x8>(x); }
inline v_float32x8 v_log(const v_float32x8& x) { return v_log_default_32f<v_float32x8, v_int32x8>(x); }
inline void v_sincos(const v_float32x8& x, v_float32x8& s, v_float32x8& c) { v_sincos_default_32f<v_float32x8, v_int32x8>(x, s, c); }
inline v_float32x8 v_sin(const v_float32x8& x) { return v_sin_default_32f<v_float32x8, v_int32x8>(x); }
inline v_float32x8 v_cos(const v_float32x8& x) { return v_cos_default_32f<v_float32x8, v_int32x8>(x); }
inline v_float32x8 v_erf(const v_float32x8& x) { return v_erf_default_32f<v_float32x8, v_int32x8>(x); }
inline v_float64x4 v_exp(const v_float64x4& x) { return v_exp_default_64f<v_float64x4, v_int64x4>(x); }
inline v_float64x4 v_log(const v_float64x4& x) { return v_log_default_64f<v_float64x4, v_int64x4>(x); }
inline void v_sincos(const v_float64x4& x, v_float64x4& s, v_float64x4& c) { v_sincos_default_64f<v_float64x4, v_int64x4>(x, s, c); }
inline v_float64x4 v_sin(const v_float64x4& x) { return v_sin_default_64f<v_float64x4, v_int64x4>(x); }
inline v_float64x4 v_cos(const v_float64x4& x) { return v_cos_default_64f<v_float64x4, v_int64x4>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

View File

@ -458,6 +458,10 @@ OPENCV_HAL_IMPL_AVX512_LOADSTORE_FLT(v_float64x8, double, pd, __m256d)
{ return _Tpvec(_mm512_setzero_si512()); } \
inline _Tpvec v512_setall_##suffix(_Tp v) \
{ return _Tpvec(_mm512_set1_##ssuffix((ctype_s)v)); } \
template <> inline _Tpvec v_setzero_() \
{ return v512_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) \
{ return v512_setall_##suffix(v); } \
OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint8x64, suffix, OPENCV_HAL_NOP) \
OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int8x64, suffix, OPENCV_HAL_NOP) \
OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint16x32, suffix, OPENCV_HAL_NOP) \
@ -483,6 +487,10 @@ OPENCV_HAL_IMPL_AVX512_INIT(v_int64x8, int64, s64, epi64, int64)
{ return _Tpvec(_mm512_setzero_##zsuffix()); } \
inline _Tpvec v512_setall_##suffix(_Tp v) \
{ return _Tpvec(_mm512_set1_##zsuffix(v)); } \
template <> inline _Tpvec v_setzero_() \
{ return v512_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) \
{ return v512_setall_##suffix(v); } \
OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint8x64, suffix, cast) \
OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_int8x64, suffix, cast) \
OPENCV_HAL_IMPL_AVX512_CAST(_Tpvec, v_uint16x32, suffix, cast) \
@ -3070,6 +3078,20 @@ inline int v_scan_forward(const v_float64x8& a) { return trailingZeros32(v_signm
inline void v512_cleanup() { _mm256_zeroall(); }
#include "intrin_math.hpp"
inline v_float32x16 v_exp(const v_float32x16& x) { return v_exp_default_32f<v_float32x16, v_int32x16>(x); }
inline v_float32x16 v_log(const v_float32x16& x) { return v_log_default_32f<v_float32x16, v_int32x16>(x); }
inline void v_sincos(const v_float32x16& x, v_float32x16& s, v_float32x16& c) { v_sincos_default_32f<v_float32x16, v_int32x16>(x, s, c); }
inline v_float32x16 v_sin(const v_float32x16& x) { return v_sin_default_32f<v_float32x16, v_int32x16>(x); }
inline v_float32x16 v_cos(const v_float32x16& x) { return v_cos_default_32f<v_float32x16, v_int32x16>(x); }
inline v_float32x16 v_erf(const v_float32x16& x) { return v_erf_default_32f<v_float32x16, v_int32x16>(x); }
inline v_float64x8 v_exp(const v_float64x8& x) { return v_exp_default_64f<v_float64x8, v_int64x8>(x); }
inline v_float64x8 v_log(const v_float64x8& x) { return v_log_default_64f<v_float64x8, v_int64x8>(x); }
inline void v_sincos(const v_float64x8& x, v_float64x8& s, v_float64x8& c) { v_sincos_default_64f<v_float64x8, v_int64x8>(x, s, c); }
inline v_float64x8 v_sin(const v_float64x8& x) { return v_sin_default_64f<v_float64x8, v_int64x8>(x); }
inline v_float64x8 v_cos(const v_float64x8& x) { return v_cos_default_64f<v_float64x8, v_int64x8>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

View File

@ -263,8 +263,8 @@ Most of these operations return only one value.
### Other math
- Some frequent operations: @ref v_sqrt, @ref v_invsqrt, @ref v_magnitude, @ref v_sqr_magnitude, @ref v_exp,
@ref v_erf
- Some frequent operations: @ref v_sqrt, @ref v_invsqrt, @ref v_magnitude, @ref v_sqr_magnitude, @ref v_exp, @ref v_log,
@ref v_erf, @ref v_sin, @ref v_cos
- Absolute values: @ref v_abs, @ref v_absdiff, @ref v_absdiffs
### Conversions
@ -366,6 +366,7 @@ Floating point:
|broadcast_element | x | |
|exp | x | x |
|log | x | x |
|sin, cos | x | x |
@{ */
@ -745,10 +746,41 @@ OPENCV_HAL_IMPL_MATH_FUNC(v_log, std::log, _Tp)
*/
OPENCV_HAL_IMPL_MATH_FUNC(v_erf, std::erf, _Tp)
//! @cond IGNORED
/**
* @brief Compute sine \f$ sin(x) \f$ and cosine \f$ cos(x) \f$ of elements at the same time
*
* Only for floating point types. Core implementation steps:
* 1. Input Normalization: Scale the periodicity from 2π to 4 and reduce the angle to the range \f$ [0, \frac{\pi}{4}] \f$ using periodicity and trigonometric identities.
* 2. Polynomial Approximation for \f$ sin(x) \f$ and \f$ cos(x) \f$:
* - For float16 and float32, use a Taylor series with 4 terms for sine and 5 terms for cosine.
* - For float64, use a Taylor series with 7 terms for sine and 8 terms for cosine.
* 3. Select Results: select and convert the final sine and cosine values for the original input angle.
*
* @note The precision of the calculation depends on the implementation and the data type of the input vector.
*/
template<typename _Tp, int n>
inline void v_sincos(const v_reg<_Tp, n>& x, v_reg<_Tp, n>& s, v_reg<_Tp, n>& c)
{
for( int i = 0; i < n; i++ )
{
s.s[i] = std::sin(x.s[i]);
c.s[i] = std::cos(x.s[i]);
}
}
/**
* @brief Sine \f$ sin(x) \f$ of elements
*
* Only for floating point types. Core implementation the same as @ref v_sincos.
*/
OPENCV_HAL_IMPL_MATH_FUNC(v_sin, std::sin, _Tp)
/**
* @brief Cosine \f$ cos(x) \f$ of elements
*
* Only for floating point types. Core implementation the same as @ref v_sincos.
*/
OPENCV_HAL_IMPL_MATH_FUNC(v_cos, std::cos, _Tp)
//! @endcond
/** @brief Absolute value of elements
@ -2801,7 +2833,8 @@ inline void v_transpose4x4( v_reg<_Tp, n>& a0, const v_reg<_Tp, n>& a1,
//! @brief Helper macro
//! @ingroup core_hal_intrin_impl
#define OPENCV_HAL_IMPL_C_INIT_ZERO(_Tpvec, prefix, suffix) \
inline _Tpvec prefix##_setzero_##suffix() { return _Tpvec::zero(); }
inline _Tpvec prefix##_setzero_##suffix() { return _Tpvec::zero(); } \
template <> inline _Tpvec v_setzero_() { return _Tpvec::zero(); }
//! @name Init with zero
//! @{
@ -2847,7 +2880,8 @@ OPENCV_HAL_IMPL_C_INIT_ZERO(v_int64x8, v512, s64)
//! @brief Helper macro
//! @ingroup core_hal_intrin_impl
#define OPENCV_HAL_IMPL_C_INIT_VAL(_Tpvec, _Tp, prefix, suffix) \
inline _Tpvec prefix##_setall_##suffix(_Tp val) { return _Tpvec::all(val); }
inline _Tpvec prefix##_setall_##suffix(_Tp val) { return _Tpvec::all(val); } \
template <> inline _Tpvec v_setall_(_Tp val) { return _Tpvec::all(val); }
//! @name Init with value
//! @{

View File

@ -557,6 +557,10 @@ inline __m256i _lasx_256_castpd_si256(const __m256d& v)
{ return _Tpvec(__lasx_xvreplgr2vr_d(0)); } \
inline _Tpvec v256_setall_##suffix(_Tp v) \
{ return _Tpvec(__lasx_xvreplgr2vr_##ssuffix((ctype_s)v)); } \
template <> inline _Tpvec v_setzero_() \
{ return v256_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) \
{ return v256_setall_##suffix(v); } \
OPENCV_HAL_IMPL_LASX_CAST(_Tpvec, v_uint8x32, suffix, OPENCV_HAL_NOP) \
OPENCV_HAL_IMPL_LASX_CAST(_Tpvec, v_int8x32, suffix, OPENCV_HAL_NOP) \
OPENCV_HAL_IMPL_LASX_CAST(_Tpvec, v_uint16x16, suffix, OPENCV_HAL_NOP) \
@ -588,7 +592,11 @@ inline __m256d _lasx_256_castsi256_pd(const __m256i &v)
inline _Tpvec v256_setzero_##suffix() \
{ return _Tpvec(__lasx_xvreplgr2vr_d(0)); } \
inline _Tpvec v256_setall_##suffix(_Tp v) \
{ return _Tpvec(_v256_setall_##zsuffix(v)); } \
{ return _Tpvec(_v256_setall_##zsuffix(v)); } \
template <> inline _Tpvec v_setzero_() \
{ return v256_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) \
{ return v256_setall_##suffix(v); } \
OPENCV_HAL_IMPL_LASX_CAST(_Tpvec, v_uint8x32, suffix, cast) \
OPENCV_HAL_IMPL_LASX_CAST(_Tpvec, v_int8x32, suffix, cast) \
OPENCV_HAL_IMPL_LASX_CAST(_Tpvec, v_uint16x16, suffix, cast) \
@ -3005,6 +3013,20 @@ inline void v_pack_store(hfloat* ptr, const v_float32x8& a)
inline void v256_cleanup() {}
#include "intrin_math.hpp"
inline v_float32x8 v_exp(const v_float32x8& x) { return v_exp_default_32f<v_float32x8, v_int32x8>(x); }
inline v_float32x8 v_log(const v_float32x8& x) { return v_log_default_32f<v_float32x8, v_int32x8>(x); }
inline void v_sincos(const v_float32x8& x, v_float32x8& s, v_float32x8& c) { v_sincos_default_32f<v_float32x8, v_int32x8>(x, s, c); }
inline v_float32x8 v_sin(const v_float32x8& x) { return v_sin_default_32f<v_float32x8, v_int32x8>(x); }
inline v_float32x8 v_cos(const v_float32x8& x) { return v_cos_default_32f<v_float32x8, v_int32x8>(x); }
inline v_float32x8 v_erf(const v_float32x8& x) { return v_erf_default_32f<v_float32x8, v_int32x8>(x); }
inline v_float64x4 v_exp(const v_float64x4& x) { return v_exp_default_64f<v_float64x4, v_int64x4>(x); }
inline v_float64x4 v_log(const v_float64x4& x) { return v_log_default_64f<v_float64x4, v_int64x4>(x); }
inline void v_sincos(const v_float64x4& x, v_float64x4& s, v_float64x4& c) { v_sincos_default_64f<v_float64x4, v_int64x4>(x, s, c); }
inline v_float64x4 v_sin(const v_float64x4& x) { return v_sin_default_64f<v_float64x4, v_int64x4>(x); }
inline v_float64x4 v_cos(const v_float64x4& x) { return v_cos_default_64f<v_float64x4, v_int64x4>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

View File

@ -417,6 +417,10 @@ inline __m128i _lsx_128_castpd_si128(const __m128d& v)
{ return _Tpvec(__lsx_vldi(0)); } \
inline _Tpvec v_setall_##suffix(_Tp v) \
{ return _Tpvec(__lsx_vreplgr2vr_##ssuffix((ctype_s)v)); } \
template <> inline _Tpvec v_setzero_() \
{ return v_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) \
{ return v_setall_##suffix(v); } \
OPENCV_HAL_IMPL_LSX_CAST(_Tpvec, v_uint8x16, suffix, OPENCV_HAL_NOP) \
OPENCV_HAL_IMPL_LSX_CAST(_Tpvec, v_int8x16, suffix, OPENCV_HAL_NOP) \
OPENCV_HAL_IMPL_LSX_CAST(_Tpvec, v_uint16x8, suffix, OPENCV_HAL_NOP) \
@ -448,6 +452,10 @@ inline __m128d _lsx_128_castsi128_pd(const __m128i &v)
{ return _Tpvec(__lsx_vldi(0)); } \
inline _Tpvec v_setall_##suffix(_Tp v) \
{ return _Tpvec(_v128_setall_##zsuffix(v)); } \
template <> inline _Tpvec v_setzero_() \
{ return v_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) \
{ return v_setall_##suffix(v); } \
OPENCV_HAL_IMPL_LSX_CAST(_Tpvec, v_uint8x16, suffix, cast) \
OPENCV_HAL_IMPL_LSX_CAST(_Tpvec, v_int8x16, suffix, cast) \
OPENCV_HAL_IMPL_LSX_CAST(_Tpvec, v_uint16x8, suffix, cast) \
@ -2515,6 +2523,20 @@ inline void v_pack_store(hfloat* ptr, const v_float32x4& a)
inline void v_cleanup() {}
#include "intrin_math.hpp"
inline v_float32x4 v_exp(const v_float32x4& x) { return v_exp_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_log(const v_float32x4& x) { return v_log_default_32f<v_float32x4, v_int32x4>(x); }
inline void v_sincos(const v_float32x4& x, v_float32x4& s, v_float32x4& c) { v_sincos_default_32f<v_float32x4, v_int32x4>(x, s, c); }
inline v_float32x4 v_sin(const v_float32x4& x) { return v_sin_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_cos(const v_float32x4& x) { return v_cos_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_erf(const v_float32x4& x) { return v_erf_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float64x2 v_exp(const v_float64x2& x) { return v_exp_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_log(const v_float64x2& x) { return v_log_default_64f<v_float64x2, v_int64x2>(x); }
inline void v_sincos(const v_float64x2& x, v_float64x2& s, v_float64x2& c) { v_sincos_default_64f<v_float64x2, v_int64x2>(x, s, c); }
inline v_float64x2 v_sin(const v_float64x2& x) { return v_sin_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_cos(const v_float64x2& x) { return v_cos_default_64f<v_float64x2, v_int64x2>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

File diff suppressed because it is too large Load Diff

View File

@ -235,6 +235,8 @@ struct v_float64x2
#define OPENCV_HAL_IMPL_MSA_INIT(_Tpv, _Tp, suffix) \
inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(msa_dupq_n_##suffix((_Tp)0)); } \
inline v_##_Tpv v_setall_##suffix(_Tp v) { return v_##_Tpv(msa_dupq_n_##suffix(v)); } \
template <> inline v_##_Tpv v_setzero_() { return v_setzero_##suffix(); } \
template <> inline v_##_Tpv v_setall_(_Tp v) { return v_setall_##suffix(v); } \
inline v_uint8x16 v_reinterpret_as_u8(const v_##_Tpv& v) { return v_uint8x16(MSA_TPV_REINTERPRET(v16u8, v.val)); } \
inline v_int8x16 v_reinterpret_as_s8(const v_##_Tpv& v) { return v_int8x16(MSA_TPV_REINTERPRET(v16i8, v.val)); } \
inline v_uint16x8 v_reinterpret_as_u16(const v_##_Tpv& v) { return v_uint16x8(MSA_TPV_REINTERPRET(v8u16, v.val)); } \
@ -1861,6 +1863,20 @@ inline void v_pack_store(hfloat* ptr, const v_float32x4& v)
inline void v_cleanup() {}
#include "intrin_math.hpp"
inline v_float32x4 v_exp(const v_float32x4& x) { return v_exp_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_log(const v_float32x4& x) { return v_log_default_32f<v_float32x4, v_int32x4>(x); }
inline void v_sincos(const v_float32x4& x, v_float32x4& s, v_float32x4& c) { v_sincos_default_32f<v_float32x4, v_int32x4>(x, s, c); }
inline v_float32x4 v_sin(const v_float32x4& x) { return v_sin_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_cos(const v_float32x4& x) { return v_cos_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_erf(const v_float32x4& x) { return v_erf_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float64x2 v_exp(const v_float64x2& x) { return v_exp_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_log(const v_float64x2& x) { return v_log_default_64f<v_float64x2, v_int64x2>(x); }
inline void v_sincos(const v_float64x2& x, v_float64x2& s, v_float64x2& c) { v_sincos_default_64f<v_float64x2, v_int64x2>(x, s, c); }
inline v_float64x2 v_sin(const v_float64x2& x) { return v_sin_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_cos(const v_float64x2& x) { return v_cos_default_64f<v_float64x2, v_int64x2>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

View File

@ -381,6 +381,8 @@ private:
#define OPENCV_HAL_IMPL_NEON_INIT(_Tpv, _Tp, suffix) \
inline v_##_Tpv v_setzero_##suffix() { return v_##_Tpv(vdupq_n_##suffix((_Tp)0)); } \
inline v_##_Tpv v_setall_##suffix(_Tp v) { return v_##_Tpv(vdupq_n_##suffix(v)); } \
template <> inline v_##_Tpv v_setzero_() { return v_setzero_##suffix(); } \
template <> inline v_##_Tpv v_setall_(_Tp v) { return v_setall_##suffix(v); } \
inline _Tpv##_t vreinterpretq_##suffix##_##suffix(_Tpv##_t v) { return v; } \
inline v_uint8x16 v_reinterpret_as_u8(const v_##_Tpv& v) { return v_uint8x16(vreinterpretq_u8_##suffix(v.val)); } \
inline v_int8x16 v_reinterpret_as_s8(const v_##_Tpv& v) { return v_int8x16(vreinterpretq_s8_##suffix(v.val)); } \
@ -2646,6 +2648,28 @@ inline void v_pack_store(hfloat* ptr, const v_float32x4& v)
inline void v_cleanup() {}
#include "intrin_math.hpp"
#if defined(CV_SIMD_FP16) && CV_SIMD_FP16
inline v_float16x8 v_exp(const v_float16x8& x) { return v_exp_default_16f<v_float16x8, v_int16x8>(x); }
inline v_float16x8 v_log(const v_float16x8& x) { return v_log_default_16f<v_float16x8, v_int16x8>(x); }
inline void v_sincos(const v_float16x8& x, v_float16x8& s, v_float16x8& c) { v_sincos_default_16f<v_float16x8, v_int16x8>(x, s, c); }
inline v_float16x8 v_sin(const v_float16x8& x) { return v_sin_default_16f<v_float16x8, v_int16x8>(x); }
inline v_float16x8 v_cos(const v_float16x8& x) { return v_cos_default_16f<v_float16x8, v_int16x8>(x); }
#endif
inline v_float32x4 v_exp(const v_float32x4& x) { return v_exp_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_log(const v_float32x4& x) { return v_log_default_32f<v_float32x4, v_int32x4>(x); }
inline void v_sincos(const v_float32x4& x, v_float32x4& s, v_float32x4& c) { v_sincos_default_32f<v_float32x4, v_int32x4>(x, s, c); }
inline v_float32x4 v_sin(const v_float32x4& x) { return v_sin_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_cos(const v_float32x4& x) { return v_cos_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_erf(const v_float32x4& x) { return v_erf_default_32f<v_float32x4, v_int32x4>(x); }
#if CV_SIMD128_64F
inline v_float64x2 v_exp(const v_float64x2& x) { return v_exp_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_log(const v_float64x2& x) { return v_log_default_64f<v_float64x2, v_int64x2>(x); }
inline void v_sincos(const v_float64x2& x, v_float64x2& s, v_float64x2& c) { v_sincos_default_64f<v_float64x2, v_int64x2>(x, s, c); }
inline v_float64x2 v_sin(const v_float64x2& x) { return v_sin_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_cos(const v_float64x2& x) { return v_cos_default_64f<v_float64x2, v_int64x2>(x); }
#endif
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

View File

@ -355,10 +355,12 @@ inline v_float64x2 v_reinterpret_as_f64(const v_float64x2& v) { return v_float64
#define OPENCV_HAL_IMPL_RISCVV_INIT_SET(__Tp, _Tp, suffix, len, num) \
inline v_##_Tp##x##num v_setzero_##suffix() { return v_##_Tp##x##num(vmv_v_x_##len##m1(0, num)); } \
inline v_##_Tp##x##num v_setall_##suffix(__Tp v) { return v_##_Tp##x##num(vmv_v_x_##len##m1(v, num)); }
inline v_##_Tp##x##num v_setall_##suffix(__Tp v) { return v_##_Tp##x##num(vmv_v_x_##len##m1(v, num)); } \
template <> inline v_##_Tp##x##num v_setzero_() { return v_setzero_##suffix(); } \
template <> inline v_##_Tp##x##num v_setall_(__Tp v) { return v_setall_##suffix(v); }
OPENCV_HAL_IMPL_RISCVV_INIT_SET(uchar, uint8, u8, u8, 16)
OPENCV_HAL_IMPL_RISCVV_INIT_SET(char, int8, s8, i8, 16)
OPENCV_HAL_IMPL_RISCVV_INIT_SET(schar, int8, s8, i8, 16)
OPENCV_HAL_IMPL_RISCVV_INIT_SET(ushort, uint16, u16, u16, 8)
OPENCV_HAL_IMPL_RISCVV_INIT_SET(short, int16, s16, i16, 8)
OPENCV_HAL_IMPL_RISCVV_INIT_SET(unsigned int, uint32, u32, u32, 4)
@ -371,6 +373,11 @@ inline v_float32x4 v_setall_f32(float v) { return v_float32x4(vfmv_v_f_f32m1(v,
inline v_float64x2 v_setzero_f64() { return v_float64x2(vfmv_v_f_f64m1(0, 2)); }
inline v_float64x2 v_setall_f64(double v) { return v_float64x2(vfmv_v_f_f64m1(v, 2)); }
template <> inline v_float32x4 v_setzero_() { return v_setzero_f32(); }
template <> inline v_float32x4 v_setall_(float v) { return v_setall_f32(v); }
template <> inline v_float64x2 v_setzero_() { return v_setzero_f64(); }
template <> inline v_float64x2 v_setall_(double v) { return v_setall_f64(v); }
#define OPENCV_HAL_IMPL_RISCVV_BIN_OP(bin_op, _Tpvec, intrin) \
inline _Tpvec bin_op(const _Tpvec& a, const _Tpvec& b) \
@ -2859,6 +2866,20 @@ inline void v_pack_store(hfloat* ptr, const v_float32x4& v)
inline void v_cleanup() {}
#include "intrin_math.hpp"
inline v_float32x4 v_exp(const v_float32x4& x) { return v_exp_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_log(const v_float32x4& x) { return v_log_default_32f<v_float32x4, v_int32x4>(x); }
inline void v_sincos(const v_float32x4& x, v_float32x4& s, v_float32x4& c) { v_sincos_default_32f<v_float32x4, v_int32x4>(x, s, c); }
inline v_float32x4 v_sin(const v_float32x4& x) { return v_sin_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_cos(const v_float32x4& x) { return v_cos_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_erf(const v_float32x4& x) { return v_erf_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float64x2 v_exp(const v_float64x2& x) { return v_exp_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_log(const v_float64x2& x) { return v_log_default_64f<v_float64x2, v_int64x2>(x); }
inline void v_sincos(const v_float64x2& x, v_float64x2& s, v_float64x2& c) { v_sincos_default_64f<v_float64x2, v_int64x2>(x, s, c); }
inline v_float64x2 v_sin(const v_float64x2& x) { return v_sin_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_cos(const v_float64x2& x) { return v_cos_default_64f<v_float64x2, v_int64x2>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

View File

@ -160,6 +160,14 @@ inline v_##_Tpvec v_setzero_##suffix1() \
inline v_##_Tpvec v_setall_##suffix1(_Tp v) \
{ \
return __riscv_vmv_v_x_##suffix2##m1(v, vl); \
} \
template <> inline v_##_Tpvec v_setzero_() \
{ \
return v_setzero_##suffix1(); \
} \
template <> inline v_##_Tpvec v_setall_(_Tp v) \
{ \
return v_setall_##suffix1(v); \
}
OPENCV_HAL_IMPL_RVV_INIT_INTEGER(uint8, uchar, u8, u8, VTraits<v_int8>::vlanes())
@ -179,6 +187,14 @@ inline v_##_Tpv v_setzero_##suffix() \
inline v_##_Tpv v_setall_##suffix(_Tp v) \
{ \
return __riscv_vfmv_v_f_##suffix##m1(v, vl); \
} \
template <> inline v_##_Tpv v_setzero_() \
{ \
return v_setzero_##suffix(); \
} \
template <> inline v_##_Tpv v_setall_(_Tp v) \
{ \
return v_setall_##suffix(v); \
}
OPENCV_HAL_IMPL_RVV_INIT_FP(float32, float, f32, VTraits<v_float32>::vlanes())
@ -2164,6 +2180,20 @@ inline v_float32 v_matmuladd(const v_float32& v, const v_float32& m0,
inline void v_cleanup() {}
#include "intrin_math.hpp"
inline v_float32 v_exp(const v_float32& x) { return v_exp_default_32f<v_float32, v_int32>(x); }
inline v_float32 v_log(const v_float32& x) { return v_log_default_32f<v_float32, v_int32>(x); }
inline void v_sincos(const v_float32& x, v_float32& s, v_float32& c) { v_sincos_default_32f<v_float32, v_int32>(x, s, c); }
inline v_float32 v_sin(const v_float32& x) { return v_sin_default_32f<v_float32, v_int32>(x); }
inline v_float32 v_cos(const v_float32& x) { return v_cos_default_32f<v_float32, v_int32>(x); }
inline v_float32 v_erf(const v_float32& x) { return v_erf_default_32f<v_float32, v_int32>(x); }
inline v_float64 v_exp(const v_float64& x) { return v_exp_default_64f<v_float64, v_int64>(x); }
inline v_float64 v_log(const v_float64& x) { return v_log_default_64f<v_float64, v_int64>(x); }
inline void v_sincos(const v_float64& x, v_float64& s, v_float64& c) { v_sincos_default_64f<v_float64, v_int64>(x, s, c); }
inline v_float64 v_sin(const v_float64& x) { return v_sin_default_64f<v_float64, v_int64>(x); }
inline v_float64 v_cos(const v_float64& x) { return v_cos_default_64f<v_float64, v_int64>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

View File

@ -347,6 +347,8 @@ namespace hal_sse_internal
#define OPENCV_HAL_IMPL_SSE_INITVEC(_Tpvec, _Tp, suffix, zsuffix, ssuffix, _Tps, cast) \
inline _Tpvec v_setzero_##suffix() { return _Tpvec(_mm_setzero_##zsuffix()); } \
inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(_mm_set1_##ssuffix((_Tps)v)); } \
template <> inline _Tpvec v_setzero_() { return v_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) { return v_setall_##suffix(v); } \
template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \
{ return _Tpvec(cast(a.val)); }
@ -364,6 +366,11 @@ inline v_int64x2 v_setzero_s64() { return v_int64x2(_mm_setzero_si128()); }
inline v_uint64x2 v_setall_u64(uint64 val) { return v_uint64x2(val, val); }
inline v_int64x2 v_setall_s64(int64 val) { return v_int64x2(val, val); }
template <> inline v_uint64x2 v_setzero_() { return v_setzero_u64(); }
template <> inline v_int64x2 v_setzero_() { return v_setzero_s64(); }
template <> inline v_uint64x2 v_setall_(uint64 val) { return v_setall_u64(val); }
template <> inline v_int64x2 v_setall_(int64 val) { return v_setall_s64(val); }
template<typename _Tpvec> inline
v_uint64x2 v_reinterpret_as_u64(const _Tpvec& a) { return v_uint64x2(a.val); }
template<typename _Tpvec> inline
@ -3452,6 +3459,21 @@ inline void v_pack_store(hfloat* ptr, const v_float32x4& v)
inline void v_cleanup() {}
#include "intrin_math.hpp"
inline v_float32x4 v_exp(const v_float32x4& x) { return v_exp_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_log(const v_float32x4& x) { return v_log_default_32f<v_float32x4, v_int32x4>(x); }
inline void v_sincos(const v_float32x4& x, v_float32x4& s, v_float32x4& c) { v_sincos_default_32f<v_float32x4, v_int32x4>(x, s, c); }
inline v_float32x4 v_sin(const v_float32x4& x) { return v_sin_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_cos(const v_float32x4& x) { return v_cos_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_erf(const v_float32x4& x) { return v_erf_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float64x2 v_exp(const v_float64x2& x) { return v_exp_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_log(const v_float64x2& x) { return v_log_default_64f<v_float64x2, v_int64x2>(x); }
inline void v_sincos(const v_float64x2& x, v_float64x2& s, v_float64x2& c) { v_sincos_default_64f<v_float64x2, v_int64x2>(x, s, c); }
inline v_float64x2 v_sin(const v_float64x2& x) { return v_sin_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_cos(const v_float64x2& x) { return v_cos_default_64f<v_float64x2, v_int64x2>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

View File

@ -261,6 +261,8 @@ OPENCV_HAL_IMPL_VSX_EXTRACT_N(v_float64x2, double)
#define OPENCV_HAL_IMPL_VSX_INITVEC(_Tpvec, _Tp, suffix, cast) \
inline _Tpvec v_setzero_##suffix() { return _Tpvec(vec_splats((_Tp)0)); } \
inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(vec_splats((_Tp)v));} \
template <> inline _Tpvec v_setzero_() { return v_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) { return v_setall_##suffix(_Tp v); } \
template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0 &a) \
{ return _Tpvec((cast)a.val); }
@ -1594,6 +1596,19 @@ template<int i, typename Tvec>
inline Tvec v_broadcast_element(const Tvec& v)
{ return Tvec(vec_splat(v.val, i)); }
#include "intrin_math.hpp"
inline v_float32x4 v_exp(const v_float32x4& x) { return v_exp_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_log(const v_float32x4& x) { return v_log_default_32f<v_float32x4, v_int32x4>(x); }
inline void v_sincos(const v_float32x4& x, v_float32x4& s, v_float32x4& c) { v_sincos_default_32f<v_float32x4, v_int32x4>(x, s, c); }
inline v_float32x4 v_sin(const v_float32x4& x) { return v_sin_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_cos(const v_float32x4& x) { return v_cos_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_erf(const v_float32x4& x) { return v_erf_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float64x2 v_exp(const v_float64x2& x) { return v_exp_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_log(const v_float64x2& x) { return v_log_default_64f<v_float64x2, v_int64x2>(x); }
inline void v_sincos(const v_float64x2& x, v_float64x2& s, v_float64x2& c) { v_sincos_default_64f<v_float64x2, v_int64x2>(x, s, c); }
inline v_float64x2 v_sin(const v_float64x2& x) { return v_sin_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_cos(const v_float64x2& x) { return v_cos_default_64f<v_float64x2, v_int64x2>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END

View File

@ -8,9 +8,18 @@
#include <limits>
#include <cstring>
#include <algorithm>
#include <emscripten/version.h>
#include "opencv2/core/saturate.hpp"
// Emscripten v2.0.13 (latest officially supported, as of 07/30/2024):
// __EMSCRIPTEN_major__, __EMSCRIPTEN_minor__ and __EMSCRIPTEN_tiny__ are defined via commandline in
// https://github.com/emscripten-core/emscripten/blob/1690a5802cd1241adc9714fb7fa2f633d38860dc/tools/shared.py#L506-L515
//
// See https://github.com/opencv/opencv/pull/25909
#ifndef __EMSCRIPTEN_major__
#include <emscripten/version.h>
#endif
#define CV_SIMD128 1
#define CV_SIMD128_64F 0 // Now all implementation of f64 use fallback, so disable it.
#define CV_SIMD128_FP16 0
@ -392,6 +401,8 @@ inline v128_t v128_cvti32x4_i64x2_high(const v128_t& a)
#define OPENCV_HAL_IMPL_WASM_INITVEC(_Tpvec, _Tp, suffix, zsuffix, _Tps) \
inline _Tpvec v_setzero_##suffix() { return _Tpvec(wasm_##zsuffix##_splat((_Tps)0)); } \
inline _Tpvec v_setall_##suffix(_Tp v) { return _Tpvec(wasm_##zsuffix##_splat((_Tps)v)); } \
template <> inline _Tpvec v_setzero_() { return v_setzero_##suffix(); } \
template <> inline _Tpvec v_setall_(_Tp v) { return v_setall_##suffix(v); } \
template<typename _Tpvec0> inline _Tpvec v_reinterpret_as_##suffix(const _Tpvec0& a) \
{ return _Tpvec(a.val); }
@ -2767,6 +2778,20 @@ inline void v_pack_store(hfloat* ptr, const v_float32x4& v)
inline void v_cleanup() {}
#include "intrin_math.hpp"
inline v_float32x4 v_exp(const v_float32x4& x) { return v_exp_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_log(const v_float32x4& x) { return v_log_default_32f<v_float32x4, v_int32x4>(x); }
inline void v_sincos(const v_float32x4& x, v_float32x4& s, v_float32x4& c) { v_sincos_default_32f<v_float32x4, v_int32x4>(x, s, c); }
inline v_float32x4 v_sin(const v_float32x4& x) { return v_sin_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_cos(const v_float32x4& x) { return v_cos_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float32x4 v_erf(const v_float32x4& x) { return v_erf_default_32f<v_float32x4, v_int32x4>(x); }
inline v_float64x2 v_exp(const v_float64x2& x) { return v_exp_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_log(const v_float64x2& x) { return v_log_default_64f<v_float64x2, v_int64x2>(x); }
inline void v_sincos(const v_float64x2& x, v_float64x2& s, v_float64x2& c) { v_sincos_default_64f<v_float64x2, v_int64x2>(x, s, c); }
inline v_float64x2 v_sin(const v_float64x2& x) { return v_sin_default_64f<v_float64x2, v_int64x2>(x); }
inline v_float64x2 v_cos(const v_float64x2& x) { return v_cos_default_64f<v_float64x2, v_int64x2>(x); }
CV_CPU_OPTIMIZATION_HAL_NAMESPACE_END
//! @endcond

View File

@ -446,6 +446,22 @@ typedef OutputArray OutputArrayOfArrays;
typedef const _InputOutputArray& InputOutputArray;
typedef InputOutputArray InputOutputArrayOfArrays;
/** @brief Returns an empty InputArray or OutputArray.
This function is used to provide an "empty" or "null" array when certain functions
take optional input or output arrays that you don't want to provide.
Many OpenCV functions accept optional arguments as `cv::InputArray` or `cv::OutputArray`.
When you don't want to pass any data for these optional parameters, you can use `cv::noArray()`
to indicate that you are omitting them.
@return An empty `cv::InputArray` or `cv::OutputArray` that can be used as a placeholder.
@note This is often used when a function has optional arrays, and you do not want to
provide a specific input or output array.
@see cv::InputArray, cv::OutputArray
*/
CV_EXPORTS InputOutputArray noArray();
/////////////////////////////////// MatAllocator //////////////////////////////////////

View File

@ -508,7 +508,7 @@ The generic function partition implements an \f$O(N^2)\f$ algorithm for splittin
into one or more equivalency classes, as described in
<http://en.wikipedia.org/wiki/Disjoint-set_data_structure> . The function returns the number of
equivalency classes.
@param _vec Set of elements stored as a vector.
@param vec Set of elements stored as a vector.
@param labels Output vector of labels. It contains as many elements as vec. Each label labels[i] is
a 0-based cluster index of `vec[i]`.
@param predicate Equivalence predicate (pointer to a boolean function of two arguments or an
@ -518,11 +518,11 @@ may or may not be in the same class.
@ingroup core_cluster
*/
template<typename _Tp, class _EqPredicate> int
partition( const std::vector<_Tp>& _vec, std::vector<int>& labels,
partition( const std::vector<_Tp>& vec, std::vector<int>& labels,
_EqPredicate predicate=_EqPredicate())
{
int i, j, N = (int)_vec.size();
const _Tp* vec = &_vec[0];
int i, j, N = (int)vec.size();
const _Tp* _vec = &vec[0];
const int PARENT=0;
const int RANK=1;
@ -548,7 +548,7 @@ partition( const std::vector<_Tp>& _vec, std::vector<int>& labels,
for( j = 0; j < N; j++ )
{
if( i == j || !predicate(vec[i], vec[j]))
if( i == j || !predicate(_vec[i], _vec[j]))
continue;
int root2 = j;

View File

@ -53,50 +53,6 @@
# error persistence.hpp header must be compiled as C++
#endif
//! @addtogroup core_c
//! @{
/** @brief "black box" representation of the file storage associated with a file on disk.
Several functions that are described below take CvFileStorage\* as inputs and allow the user to
save or to load hierarchical collections that consist of scalar values, standard CXCore objects
(such as matrices, sequences, graphs), and user-defined objects.
OpenCV can read and write data in XML (<http://www.w3c.org/XML>), YAML (<http://www.yaml.org>) or
JSON (<http://www.json.org/>) formats. Below is an example of 3x3 floating-point identity matrix A,
stored in XML and YAML files
using CXCore functions:
XML:
@code{.xml}
<?xml version="1.0">
<opencv_storage>
<A type_id="opencv-matrix">
<rows>3</rows>
<cols>3</cols>
<dt>f</dt>
<data>1. 0. 0. 0. 1. 0. 0. 0. 1.</data>
</A>
</opencv_storage>
@endcode
YAML:
@code{.yaml}
%YAML:1.0
A: !!opencv-matrix
rows: 3
cols: 3
dt: f
data: [ 1., 0., 0., 0., 1., 0., 0., 0., 1.]
@endcode
As it can be seen from the examples, XML uses nested tags to represent hierarchy, while YAML uses
indentation for that purpose (similar to the Python programming language).
The same functions can read and write data in both formats; the particular format is determined by
the extension of the opened file, ".xml" for XML files, ".yml" or ".yaml" for YAML and ".json" for
JSON.
*/
//! @} core_c
#include "opencv2/core/types.hpp"
#include "opencv2/core/mat.hpp"
@ -283,13 +239,14 @@ element is a structure of 2 integers, followed by a single-precision floating-po
equivalent notations of the above specification are `iif`, `2i1f` and so forth. Other examples: `u`
means that the array consists of bytes, and `2d` means the array consists of pairs of doubles.
@see @ref samples/cpp/filestorage.cpp
@see @ref samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp
*/
//! @{
/** @example samples/cpp/filestorage.cpp
/** @example samples/cpp/tutorial_code/core/file_input_output/file_input_output.cpp
A complete example using the FileStorage interface
Check @ref tutorial_file_input_output_with_xml_yml "the corresponding tutorial" for more details
*/
////////////////////////// XML & YAML I/O //////////////////////////
@ -322,10 +279,10 @@ public:
};
enum State
{
UNDEFINED = 0,
VALUE_EXPECTED = 1,
NAME_EXPECTED = 2,
INSIDE_MAP = 4
UNDEFINED = 0, //!< Initial or uninitialized state.
VALUE_EXPECTED = 1, //!< Expecting a value in the current position.
NAME_EXPECTED = 2, //!< Expecting a key/name in the current position.
INSIDE_MAP = 4 //!< Indicates being inside a map (a set of key-value pairs).
};
/** @brief The constructors.

View File

@ -31,7 +31,7 @@
#include <iostream>
namespace cv
{
//! @addtogroup core
//! @addtogroup core_quaternion
//! @{
//! Unit quaternion flag

View File

@ -176,7 +176,38 @@ extern "C" typedef int (*ErrorCallback)( int status, const char* func_name,
*/
CV_EXPORTS ErrorCallback redirectError( ErrorCallback errCallback, void* userdata=0, void** prevUserdata=0);
/** @brief Generates a unique temporary file name.
This function generates a full, unique file path for a temporary file,
which can be used to create temporary files for various purposes.
@param suffix (optional) The desired file extension or suffix for the temporary file (e.g., ".png", ".txt").
If no suffix is provided (suffix = 0), the file will not have a specific extension.
@return cv::String A full unique path for the temporary file.
@note
- The function does not create the file, it only generates the name.
- The file name is unique for the system session.
- Works cross-platform (Windows, Linux, macOS).
*/
CV_EXPORTS String tempfile( const char* suffix = 0);
/** @brief Searches for files matching the specified pattern in a directory.
This function searches for files that match a given pattern (e.g., `*.jpg`)
in the specified directory. The search can be limited to the directory itself
or be recursive, including subdirectories.
@param pattern The file search pattern, which can include wildcards like `*`
(for matching multiple characters) or `?` (for matching a single character).
@param result Output vector where the file paths matching the search
pattern will be stored.
@param recursive (optional) Boolean flag indicating whether to search
subdirectories recursively. If true, the search will include all subdirectories.
The default value is `false`.
*/
CV_EXPORTS void glob(String pattern, std::vector<String>& result, bool recursive = false);
/** @brief OpenCV will try to set the number of threads for subsequent parallel regions.
@ -309,11 +340,12 @@ public:
//! stops counting ticks.
CV_WRAP void stop()
{
int64 time = cv::getTickCount();
const int64 time = cv::getTickCount();
if (startTime == 0)
return;
++counter;
sumTime += (time - startTime);
lastTime = time - startTime;
sumTime += lastTime;
startTime = 0;
}
@ -336,11 +368,35 @@ public:
}
//! returns passed time in seconds.
CV_WRAP double getTimeSec() const
CV_WRAP double getTimeSec() const
{
return (double)getTimeTicks() / getTickFrequency();
}
//! returns counted ticks of the last iteration.
CV_WRAP int64 getLastTimeTicks() const
{
return lastTime;
}
//! returns passed time of the last iteration in microseconds.
CV_WRAP double getLastTimeMicro() const
{
return getLastTimeMilli()*1e3;
}
//! returns passed time of the last iteration in milliseconds.
CV_WRAP double getLastTimeMilli() const
{
return getLastTimeSec()*1e3;
}
//! returns passed time of the last iteration in seconds.
CV_WRAP double getLastTimeSec() const
{
return (double)getLastTimeTicks() / getTickFrequency();
}
//! returns internal counter value.
CV_WRAP int64 getCounter() const
{
@ -373,15 +429,17 @@ public:
//! resets internal values.
CV_WRAP void reset()
{
startTime = 0;
sumTime = 0;
counter = 0;
sumTime = 0;
startTime = 0;
lastTime = 0;
}
private:
int64 counter;
int64 sumTime;
int64 startTime;
int64 lastTime;
};
/** @brief output operator

View File

@ -409,7 +409,7 @@ void RNG::fill( InputOutputArray _mat, int disttype,
(((_param2.rows == 1 || _param2.cols == 1) &&
(_param2.rows + _param2.cols - 1 == cn || _param2.rows + _param2.cols - 1 == 1 ||
(_param1.size() == Size(1, 4) && _param1.type() == CV_64F && cn <= 4))) ||
(_param2.rows == cn && _param2.cols == cn && disttype == NORMAL)));
(_param2.rows == cn && _param2.cols == cn && disttype == RNG::NORMAL)));
Vec2i* ip = 0;
Vec2d* dp = 0;
@ -421,7 +421,7 @@ void RNG::fill( InputOutputArray _mat, int disttype,
int n1 = (int)_param1.total();
int n2 = (int)_param2.total();
if( disttype == UNIFORM )
if( disttype == RNG::UNIFORM )
{
_parambuf.allocate(cn*8 + n1 + n2);
double* parambuf = _parambuf.data();
@ -535,7 +535,7 @@ void RNG::fill( InputOutputArray _mat, int disttype,
}
CV_Assert( func != 0 );
}
else if( disttype == CV_RAND_NORMAL )
else if( disttype == RNG::NORMAL )
{
_parambuf.allocate(MAX(n1, cn) + MAX(n2, cn));
double* parambuf = _parambuf.data();
@ -586,7 +586,7 @@ void RNG::fill( InputOutputArray _mat, int disttype,
float* nbuf = 0;
float* tmpbuf = 0;
if( disttype == UNIFORM )
if( disttype == RNG::UNIFORM )
{
buf.allocate(blockSize*cn*4);
param = (uchar*)(double*)buf.data();
@ -637,7 +637,7 @@ void RNG::fill( InputOutputArray _mat, int disttype,
{
int len = std::min(total - j, blockSize);
if( disttype == CV_RAND_UNI )
if( disttype == RNG::UNIFORM )
func( ptr, len*cn, &state, param, tmpbuf, smallFlag );
else
{
@ -753,12 +753,31 @@ void cv::randShuffle( InputOutputArray _dst, double iterFactor, RNG* _rng )
#ifndef OPENCV_EXCLUDE_C_API
// Related with https://github.com/opencv/opencv/issues/26258
// To suppress cast-user-defined warning for casting CvRNG to cv::RNG& with GCC14.
// ( CvRNG is uint64, and cv::RNG has only status member which is uint64. )
#if defined(__GNUC__) && __GNUC__ >= 14
#define CV_IGNORE_CAST_USER_DEFINED_WARNING
#endif
CV_IMPL void
cvRandArr( CvRNG* _rng, CvArr* arr, int disttype, CvScalar param1, CvScalar param2 )
{
cv::Mat mat = cv::cvarrToMat(arr);
#ifdef CV_IGNORE_CAST_USER_DEFINED_WARNING
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-user-defined"
#endif
// !!! this will only work for current 64-bit MWC RNG !!!
cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
#ifdef CV_IGNORE_CAST_USER_DEFINED_WARNING
#pragma GCC diagnostic pop
#endif
rng.fill(mat, disttype == CV_RAND_NORMAL ?
cv::RNG::NORMAL : cv::RNG::UNIFORM, cv::Scalar(param1), cv::Scalar(param2) );
}
@ -766,10 +785,25 @@ cvRandArr( CvRNG* _rng, CvArr* arr, int disttype, CvScalar param1, CvScalar para
CV_IMPL void cvRandShuffle( CvArr* arr, CvRNG* _rng, double iter_factor )
{
cv::Mat dst = cv::cvarrToMat(arr);
#ifdef CV_IGNORE_CAST_USER_DEFINED_WARNING
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-user-defined"
#endif
cv::RNG& rng = _rng ? (cv::RNG&)*_rng : cv::theRNG();
#ifdef CV_IGNORE_CAST_USER_DEFINED_WARNING
#pragma GCC diagnostic pop
#endif
cv::randShuffle( dst, iter_factor, &rng );
}
#ifdef CV_IGNORE_CAST_USER_DEFINED_WARNING
#undef CV_IGNORE_CAST_USER_DEFINED_WARNING
#endif
#endif // OPENCV_EXCLUDE_C_API

View File

@ -300,14 +300,20 @@ template<typename R> struct TheTest
#else
#error "Configuration error"
#endif
R setall_res3 = v_setall_<R>((LaneType)7);
R setall_resz = v_setzero_<R>();
#if CV_SIMD_WIDTH > 0
Data<R> setall_res1_; v_store(setall_res1_.d, setall_res1);
Data<R> setall_res2_; v_store(setall_res2_.d, setall_res2);
Data<R> setall_res3_; v_store(setall_res3_.d, setall_res3);
Data<R> setall_resz_; v_store(setall_resz_.d, setall_resz);
for (int i = 0; i < VTraits<R>::vlanes(); ++i)
{
SCOPED_TRACE(cv::format("i=%d", i));
EXPECT_EQ((LaneType)5, setall_res1_[i]);
EXPECT_EQ((LaneType)6, setall_res2_[i]);
EXPECT_EQ((LaneType)7, setall_res3_[i]);
EXPECT_EQ((LaneType)0, setall_resz_[i]);
}
#endif
@ -1906,6 +1912,99 @@ template<typename R> struct TheTest
return *this;
}
void __test_sincos(LaneType diff_thr, LaneType flt_min) {
int n = VTraits<R>::vlanes();
// Test each value for a period, from -PI to PI
const LaneType step = (LaneType) 0.01;
for (LaneType i = 0; i <= (LaneType)M_PI;) {
Data<R> dataPosPI, dataNegPI;
for (int j = 0; j < n; ++j) {
dataPosPI[j] = i;
dataNegPI[j] = -i;
i += step;
}
R posPI = dataPosPI, negPI = dataNegPI, sinPos, cosPos, sinNeg, cosNeg;
v_sincos(posPI, sinPos, cosPos);
v_sincos(negPI, sinNeg, cosNeg);
Data<R> resSinPos = sinPos, resCosPos = cosPos, resSinNeg = sinNeg, resCosNeg = cosNeg;
for (int j = 0; j < n; ++j) {
LaneType std_sin_pos = (LaneType) std::sin(dataPosPI[j]);
LaneType std_cos_pos = (LaneType) std::cos(dataPosPI[j]);
LaneType std_sin_neg = (LaneType) std::sin(dataNegPI[j]);
LaneType std_cos_neg = (LaneType) std::cos(dataNegPI[j]);
SCOPED_TRACE(cv::format("Period test value: %lf and %lf", (double) dataPosPI[j], (double) dataNegPI[j]));
EXPECT_LT(std::abs(resSinPos[j] - std_sin_pos), diff_thr * (std::abs(std_sin_pos) + flt_min * 100));
EXPECT_LT(std::abs(resCosPos[j] - std_cos_pos), diff_thr * (std::abs(std_cos_pos) + flt_min * 100));
EXPECT_LT(std::abs(resSinNeg[j] - std_sin_neg), diff_thr * (std::abs(std_sin_neg) + flt_min * 100));
EXPECT_LT(std::abs(resCosNeg[j] - std_cos_neg), diff_thr * (std::abs(std_cos_neg) + flt_min * 100));
}
}
// Test special values
std::vector<LaneType> specialValues = {(LaneType) 0, (LaneType) M_PI, (LaneType) (M_PI / 2), (LaneType) INFINITY, (LaneType) -INFINITY, (LaneType) NAN};
const int testRandNum = 10000;
const double specialValueProbability = 0.1; // 10% chance to insert a special value
cv::RNG_MT19937 rng;
for (int i = 0; i < testRandNum; i++) {
Data<R> dataRand;
for (int j = 0; j < n; ++j) {
if (rng.uniform(0.f, 1.f) <= specialValueProbability) {
// Insert a special value
int specialValueIndex = rng.uniform(0, (int) specialValues.size());
dataRand[j] = specialValues[specialValueIndex];
} else {
// Generate uniform random data in [-1000, 1000]
dataRand[j] = (LaneType) rng.uniform(-1000, 1000);
}
}
// Compare with std::sin and std::cos
R x = dataRand, s, c;
v_sincos(x, s, c);
Data<R> resSin = s, resCos = c;
for (int j = 0; j < n; ++j) {
SCOPED_TRACE(cv::format("Random test value: %lf", (double) dataRand[j]));
LaneType std_sin = (LaneType) std::sin(dataRand[j]);
LaneType std_cos = (LaneType) std::cos(dataRand[j]);
// input NaN, +INF, -INF -> output NaN
if (std::isnan(dataRand[j]) || std::isinf(dataRand[j])) {
EXPECT_TRUE(std::isnan(resSin[j]));
EXPECT_TRUE(std::isnan(resCos[j]));
} else if(dataRand[j] == 0) {
// sin(0) -> 0, cos(0) -> 1
EXPECT_EQ(resSin[j], 0);
EXPECT_EQ(resCos[j], 1);
} else {
EXPECT_LT(std::abs(resSin[j] - std_sin), diff_thr * (std::abs(std_sin) + flt_min * 100));
EXPECT_LT(std::abs(resCos[j] - std_cos), diff_thr * (std::abs(std_cos) + flt_min * 100));
}
}
}
}
TheTest &test_sincos_fp16() {
#if CV_SIMD_FP16
hfloat flt16_min;
uint16_t flt16_min_hex = 0x0400;
std::memcpy(&flt16_min, &flt16_min_hex, sizeof(hfloat));
__test_sincos((hfloat) 1e-3, flt16_min);
#endif
return *this;
}
TheTest &test_sincos_fp32() {
__test_sincos(1e-6f, FLT_MIN);
return *this;
}
TheTest &test_sincos_fp64() {
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F
__test_sincos(1e-11, DBL_MIN);
#endif
return *this;
}
};
#define DUMP_ENTRY(type) printf("SIMD%d: %s\n", 8*VTraits<v_uint8>::vlanes(), CV__TRACE_FUNCTION);
@ -2221,6 +2320,7 @@ void test_hal_intrin_float32()
.test_pack_triplets()
.test_exp_fp32()
.test_log_fp32()
.test_sincos_fp32()
.test_erf_fp32()
#if CV_SIMD_WIDTH == 32
.test_extract<4>().test_extract<5>().test_extract<6>().test_extract<7>()
@ -2255,6 +2355,7 @@ void test_hal_intrin_float64()
.test_extract_highest()
.test_exp_fp64()
.test_log_fp64()
.test_sincos_fp64()
//.test_broadcast_element<0>().test_broadcast_element<1>()
#if CV_SIMD_WIDTH == 32
.test_extract<2>().test_extract<3>()
@ -2277,6 +2378,7 @@ void test_hal_intrin_float16()
.test_float_cvt_fp16()
.test_exp_fp16()
.test_log_fp16()
.test_sincos_fp16()
#endif
;
#else

View File

@ -118,12 +118,12 @@ protected:
int cn = cvtest::randInt(rng) % 4 + 1;
Mat test_mat(cvtest::randInt(rng)%30+1, cvtest::randInt(rng)%30+1, CV_MAKETYPE(depth, cn));
rng0.fill(test_mat, CV_RAND_UNI, Scalar::all(ranges[depth][0]), Scalar::all(ranges[depth][1]));
rng0.fill(test_mat, RNG::UNIFORM, Scalar::all(ranges[depth][0]), Scalar::all(ranges[depth][1]));
if( depth >= CV_32F )
{
exp(test_mat, test_mat);
Mat test_mat_scale(test_mat.size(), test_mat.type());
rng0.fill(test_mat_scale, CV_RAND_UNI, Scalar::all(-1), Scalar::all(1));
rng0.fill(test_mat_scale, RNG::UNIFORM, Scalar::all(-1), Scalar::all(1));
cv::multiply(test_mat, test_mat_scale, test_mat);
}
@ -136,12 +136,12 @@ protected:
};
MatND test_mat_nd(3, sz, CV_MAKETYPE(depth, cn));
rng0.fill(test_mat_nd, CV_RAND_UNI, Scalar::all(ranges[depth][0]), Scalar::all(ranges[depth][1]));
rng0.fill(test_mat_nd, RNG::UNIFORM, Scalar::all(ranges[depth][0]), Scalar::all(ranges[depth][1]));
if( depth >= CV_32F )
{
exp(test_mat_nd, test_mat_nd);
MatND test_mat_scale(test_mat_nd.dims, test_mat_nd.size, test_mat_nd.type());
rng0.fill(test_mat_scale, CV_RAND_UNI, Scalar::all(-1), Scalar::all(1));
rng0.fill(test_mat_scale, RNG::UNIFORM, Scalar::all(-1), Scalar::all(1));
cv::multiply(test_mat_nd, test_mat_scale, test_mat_nd);
}

View File

@ -650,8 +650,8 @@ void Core_ArrayOpTest::run( int /* start_from */)
MatND A(3, sz3, CV_32F), B(3, sz3, CV_16SC4);
CvMatND matA = cvMatND(A), matB = cvMatND(B);
RNG rng;
rng.fill(A, CV_RAND_UNI, Scalar::all(-10), Scalar::all(10));
rng.fill(B, CV_RAND_UNI, Scalar::all(-10), Scalar::all(10));
rng.fill(A, RNG::UNIFORM, Scalar::all(-10), Scalar::all(10));
rng.fill(B, RNG::UNIFORM, Scalar::all(-10), Scalar::all(10));
int idx0[] = {3,4,5}, idx1[] = {0, 9, 7};
float val0 = 130;
@ -807,7 +807,7 @@ void Core_ArrayOpTest::run( int /* start_from */)
all_vals.resize(nz0);
all_vals2.resize(nz0);
Mat_<double> _all_vals(all_vals), _all_vals2(all_vals2);
rng.fill(_all_vals, CV_RAND_UNI, Scalar(-1000), Scalar(1000));
rng.fill(_all_vals, RNG::UNIFORM, Scalar(-1000), Scalar(1000));
if( depth == CV_32F )
{
Mat _all_vals_f;

View File

@ -48,7 +48,7 @@ bool Core_RandTest::check_pdf(const Mat& hist, double scale,
sum += H[i];
CV_Assert( fabs(1./sum - scale) < FLT_EPSILON );
if( dist_type == CV_RAND_UNI )
if( dist_type == RNG::UNIFORM )
{
float scale0 = (float)(1./hsz);
for( i = 0; i < hsz; i++ )
@ -79,7 +79,7 @@ bool Core_RandTest::check_pdf(const Mat& hist, double scale,
}
realval = chi2;
double chi2_pval = chi2_p95(hsz - 1 - (dist_type == CV_RAND_NORMAL ? 2 : 0));
double chi2_pval = chi2_p95(hsz - 1 - (dist_type == RNG::NORMAL ? 2 : 0));
refval = chi2_pval*0.01;
return realval <= refval;
}
@ -108,7 +108,7 @@ void Core_RandTest::run( int )
int depth = cvtest::randInt(rng) % (CV_64F+1);
int c, cn = (cvtest::randInt(rng) % 4) + 1;
int type = CV_MAKETYPE(depth, cn);
int dist_type = cvtest::randInt(rng) % (CV_RAND_NORMAL+1);
int dist_type = cvtest::randInt(rng) % (RNG::NORMAL+1);
int i, k, SZ = N/cn;
Scalar A, B;
@ -116,18 +116,18 @@ void Core_RandTest::run( int )
if (depth == CV_64F)
eps = 1.e-7;
bool do_sphere_test = dist_type == CV_RAND_UNI;
bool do_sphere_test = dist_type == RNG::UNIFORM;
Mat arr[2], hist[4];
int W[] = {0,0,0,0};
arr[0].create(1, SZ, type);
arr[1].create(1, SZ, type);
bool fast_algo = dist_type == CV_RAND_UNI && depth < CV_32F;
bool fast_algo = dist_type == RNG::UNIFORM && depth < CV_32F;
for( c = 0; c < cn; c++ )
{
int a, b, hsz;
if( dist_type == CV_RAND_UNI )
if( dist_type == RNG::UNIFORM )
{
a = (int)(cvtest::randInt(rng) % (_ranges[depth][1] -
_ranges[depth][0])) + _ranges[depth][0];
@ -188,8 +188,8 @@ void Core_RandTest::run( int )
const uchar* data = arr[0].ptr();
int* H = hist[c].ptr<int>();
int HSZ = hist[c].cols;
double minVal = dist_type == CV_RAND_UNI ? A[c] : A[c] - B[c]*4;
double maxVal = dist_type == CV_RAND_UNI ? B[c] : A[c] + B[c]*4;
double minVal = dist_type == RNG::UNIFORM ? A[c] : A[c] - B[c]*4;
double maxVal = dist_type == RNG::UNIFORM ? B[c] : A[c] + B[c]*4;
double scale = HSZ/(maxVal - minVal);
double delta = -minVal*scale;
@ -210,7 +210,7 @@ void Core_RandTest::run( int )
H[ival]++;
W[c]++;
}
else if( dist_type == CV_RAND_UNI )
else if( dist_type == RNG::UNIFORM )
{
if( (minVal <= val && val < maxVal) || (depth >= CV_32F && val == maxVal) )
{
@ -224,14 +224,14 @@ void Core_RandTest::run( int )
}
}
if( dist_type == CV_RAND_UNI && W[c] != SZ )
if( dist_type == RNG::UNIFORM && W[c] != SZ )
{
ts->printf( cvtest::TS::LOG, "Uniform RNG gave values out of the range [%g,%g) on channel %d/%d\n",
A[c], B[c], c, cn);
ts->set_failed_test_info( cvtest::TS::FAIL_INVALID_OUTPUT );
return;
}
if( dist_type == CV_RAND_NORMAL && W[c] < SZ*.90)
if( dist_type == RNG::NORMAL && W[c] < SZ*.90)
{
ts->printf( cvtest::TS::LOG, "Normal RNG gave too many values out of the range (%g+4*%g,%g+4*%g) on channel %d/%d\n",
A[c], B[c], A[c], B[c], c, cn);

View File

@ -43,7 +43,7 @@
#define OPENCV_DNN_DNN_SHAPE_UTILS_HPP
#include <opencv2/dnn/dnn.hpp>
#include <opencv2/core/types_c.h> // CV_MAX_DIM
#include <opencv2/core/cvdef.h> // CV_MAX_DIM
#include <iostream>
#include <ostream>
#include <sstream>

View File

@ -873,10 +873,13 @@ extractMSER_8uC3( const Mat& src,
const MSER_Impl::Params& params )
{
bboxvec.clear();
MSCRNode* map = (MSCRNode*)cvAlloc( src.cols*src.rows*sizeof(map[0]) );
AutoBuffer<MSCRNode> mapBuf(src.cols*src.rows);
MSCRNode* map = mapBuf.data();
int Ne = src.cols*src.rows*2-src.cols-src.rows;
MSCREdge* edge = (MSCREdge*)cvAlloc( Ne*sizeof(edge[0]) );
TempMSCR* mscr = (TempMSCR*)cvAlloc( src.cols*src.rows*sizeof(mscr[0]) );
AutoBuffer<MSCREdge> edgeBuf(Ne);
MSCREdge* edge = edgeBuf.data();
AutoBuffer<TempMSCR> mscrBuf(src.cols*src.rows);
TempMSCR* mscr = mscrBuf.data();
double emean = 0;
Mat dx( src.rows, src.cols-1, CV_64FC1 );
Mat dy( src.rows-1, src.cols, CV_64FC1 );
@ -987,9 +990,6 @@ extractMSER_8uC3( const Mat& src,
}
bboxvec.push_back(Rect(xmin, ymin, xmax - xmin + 1, ymax - ymin + 1));
}
cvFree( &mscr );
cvFree( &edge );
cvFree( &map );
}
void MSER_Impl::detectRegions( InputArray _src, vector<vector<Point> >& msers, vector<Rect>& bboxes )

View File

@ -85,15 +85,15 @@ CV_ALWAYS_INLINE void calcRowLinear32FC1Impl(float *dst[],
v_deinterleave(low1, high1, s00, s01);
// v_float32 res0 = s00*alpha0 + s01*alpha1;
v_float32x8 res0 = v_fma(s00 - s01, alpha0, s01);
v_float32x8 res0 = v_fma(v_sub(s00, s01), alpha0, s01);
v_gather_pairs(src1[line], &mapsx[x], low2, high2);
v_deinterleave(low2, high2, s10, s11);
// v_float32 res1 = s10*alpha0 + s11*alpha1;
v_float32x8 res1 = v_fma(s10 - s11, alpha0, s11);
v_float32x8 res1 = v_fma(v_sub(s10, s11), alpha0, s11);
// v_float32 d = res0*beta0 + res1*beta1;
v_float32x8 d = v_fma(res0 - res1, v_beta0, res1);
v_float32x8 d = v_fma(v_sub(res0, res1), v_beta0, res1);
v_store(&dst[line][x], d);
}
@ -126,7 +126,7 @@ CV_ALWAYS_INLINE void calcRowLinear32FC1Impl(float *dst[],
v_deinterleave(low, high, s00, s01);
// v_float32 d = s00*alpha0 + s01*alpha1;
v_float32x8 d = v_fma(s00 - s01, alpha0, s01);
v_float32x8 d = v_fma(v_sub(s00, s01), alpha0, s01);
v_store(&dst[line][x], d);
}
@ -157,7 +157,7 @@ CV_ALWAYS_INLINE void calcRowLinear32FC1Impl(float *dst[],
v_float32x8 s1 = v256_load(&src1[line][x]);
// v_float32 d = s0*beta0 + s1*beta1;
v_float32x8 d = v_fma(s0 - s1, v_beta0, s1);
v_float32x8 d = v_fma(v_sub(s0, s1), v_beta0, s1);
v_store(&dst[line][x], d);
}

View File

@ -142,8 +142,7 @@ static constexpr size_t kAvifSignatureSize = 500;
AvifDecoder::AvifDecoder() {
m_buf_supported = true;
channels_ = 0;
decoder_ = avifDecoderCreate();
decoder_->strictFlags = AVIF_STRICT_DISABLED;
decoder_ = nullptr;
}
AvifDecoder::~AvifDecoder() {
@ -181,6 +180,11 @@ bool AvifDecoder::checkSignature(const String &signature) const {
ImageDecoder AvifDecoder::newDecoder() const { return makePtr<AvifDecoder>(); }
bool AvifDecoder::readHeader() {
if (decoder_)
return true;
decoder_ = avifDecoderCreate();
decoder_->strictFlags = AVIF_STRICT_DISABLED;
if (!m_buf.empty()) {
CV_Assert(m_buf.type() == CV_8UC1);
CV_Assert(m_buf.rows == 1);

View File

@ -337,11 +337,20 @@ TEST_P(Imgcodecs_Avif_Animation_WriteDecodeSuite, encode_decode) {
std::vector<unsigned char> buf(size);
EXPECT_TRUE(file.read(reinterpret_cast<char*>(buf.data()), size));
file.close();
EXPECT_EQ(0, remove(output.c_str()));
std::vector<cv::Mat> anim;
ASSERT_TRUE(cv::imdecodemulti(buf, imread_mode_, anim));
ValidateRead(anim_original, anim);
if (imread_mode_ == IMREAD_UNCHANGED) {
ImageCollection collection(output, IMREAD_UNCHANGED);
anim.clear();
for (auto&& i : collection)
anim.push_back(i);
ValidateRead(anim_original, anim);
}
EXPECT_EQ(0, remove(output.c_str()));
}
INSTANTIATE_TEST_CASE_P(

View File

@ -48,7 +48,7 @@
/**
@defgroup imgproc Image Processing
This module includes image-processing functions.
This module offers a comprehensive suite of image processing functions, enabling tasks such as those listed above.
@{
@defgroup imgproc_filter Image Filtering

View File

@ -64,7 +64,7 @@ CollectPolyEdges( Mat& img, const Point2l* v, int npts,
int shift, Point offset=Point() );
static void
FillEdgeCollection( Mat& img, std::vector<PolyEdge>& edges, const void* color, int line_type);
FillEdgeCollection( Mat& img, std::vector<PolyEdge>& edges, const void* color );
static void
PolyLine( Mat& img, const Point2l* v, int npts, bool closed,
@ -1051,7 +1051,7 @@ EllipseEx( Mat& img, Point2l center, Size2l axes,
v.push_back(center);
std::vector<PolyEdge> edges;
CollectPolyEdges( img, &v[0], (int)v.size(), edges, color, line_type, XY_SHIFT );
FillEdgeCollection( img, edges, color, line_type );
FillEdgeCollection( img, edges, color );
}
}
@ -1299,15 +1299,11 @@ CollectPolyEdges( Mat& img, const Point2l* v, int count, std::vector<PolyEdge>&
if (t0.y != t1.y)
{
pt0c.y = t0.y; pt1c.y = t1.y;
pt0c.x = (int64)(t0.x) << XY_SHIFT;
pt1c.x = (int64)(t1.x) << XY_SHIFT;
}
}
else
{
pt0c.x += XY_ONE >> 1;
pt1c.x += XY_ONE >> 1;
}
pt0c.x = (int64)(t0.x) << XY_SHIFT;
pt1c.x = (int64)(t1.x) << XY_SHIFT;
}
else
{
@ -1349,7 +1345,7 @@ struct CmpEdges
/**************** helper macros and functions for sequence/contour processing ***********/
static void
FillEdgeCollection( Mat& img, std::vector<PolyEdge>& edges, const void* color, int line_type)
FillEdgeCollection( Mat& img, std::vector<PolyEdge>& edges, const void* color )
{
PolyEdge tmp;
int i, y, total = (int)edges.size();
@ -1358,12 +1354,7 @@ FillEdgeCollection( Mat& img, std::vector<PolyEdge>& edges, const void* color, i
int y_max = INT_MIN, y_min = INT_MAX;
int64 x_max = 0xFFFFFFFFFFFFFFFF, x_min = 0x7FFFFFFFFFFFFFFF;
int pix_size = (int)img.elemSize();
int delta;
if (line_type < cv::LINE_AA)
delta = 0;
else
delta = XY_ONE - 1;
int delta = XY_ONE - 1;
if( total < 2 )
return;
@ -2051,7 +2042,7 @@ void fillPoly( InputOutputArray _img, const Point** pts, const int* npts, int nc
}
}
FillEdgeCollection(img, edges, buf, line_type);
FillEdgeCollection(img, edges, buf);
}
void polylines( InputOutputArray _img, const Point* const* pts, const int* npts, int ncontours, bool isClosed,
@ -2690,7 +2681,7 @@ cvDrawContours( void* _img, CvSeq* contour,
}
if( thickness < 0 )
cv::FillEdgeCollection( img, edges, ext_buf, line_type);
cv::FillEdgeCollection( img, edges, ext_buf );
if( h_next && contour0 )
contour0->h_next = h_next;

View File

@ -2599,7 +2599,7 @@ private:
#endif
static bool ipp_warpAffine( InputArray _src, OutputArray _dst, int interpolation, int borderType, InputArray _M, int flags )
static bool ipp_warpAffine( InputArray _src, OutputArray _dst, int interpolation, int borderType, const Scalar & borderValue, InputArray _M, int flags )
{
#ifdef HAVE_IPP_IW
CV_INSTRUMENT_REGION_IPP();
@ -2618,7 +2618,7 @@ static bool ipp_warpAffine( InputArray _src, OutputArray _dst, int interpolation
Mat dst = _dst.getMat();
::ipp::IwiImage iwSrc = ippiGetImage(src);
::ipp::IwiImage iwDst = ippiGetImage(dst);
::ipp::IwiBorderType ippBorder(ippiGetBorderType(borderType));
::ipp::IwiBorderType ippBorder(ippiGetBorderType(borderType), ippiGetValue(borderValue));
IwTransDirection iwTransDirection;
if(!ippBorder)
return false;
@ -2661,7 +2661,7 @@ static bool ipp_warpAffine( InputArray _src, OutputArray _dst, int interpolation
return true;
#else
CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(interpolation);
CV_UNUSED(borderType); CV_UNUSED(_M); CV_UNUSED(flags);
CV_UNUSED(borderType); CV_UNUSED(borderValue); CV_UNUSED(_M); CV_UNUSED(flags);
return false;
#endif
}
@ -2828,7 +2828,7 @@ void cv::warpAffine( InputArray _src, OutputArray _dst,
CV_Assert( (M0.type() == CV_32F || M0.type() == CV_64F) && M0.rows == 2 && M0.cols == 3 );
M0.convertTo(matM, matM.type());
CV_IPP_RUN_FAST(ipp_warpAffine(src, dst, interpolation, borderType, matM, flags));
CV_IPP_RUN_FAST(ipp_warpAffine(src, dst, interpolation, borderType, borderValue, matM, flags));
if( !(flags & WARP_INVERSE_MAP) )
{

View File

@ -2696,7 +2696,7 @@ public:
#elif CV_SIMD_WIDTH == 64
v_zip(t0, t3, s0, s1); v_zip(t1, t4, s2, s3); v_zip(t2, t5, s4, s5);
v_zip(s0, s3, t0, t1); v_zip(s1, s4, t2, t3); v_zip(s2, s5, t4, t5);
bl = t0 + t3; gl = t1 + t4; rl = t2 + t5;
bl = v_add(t0, t3); gl = v_add(t1, t4); rl = v_add(t2, t5);
#endif
s0 = v_add(vx_load_expand(S0 + 6 * VTraits<v_uint16>::vlanes()), vx_load_expand(S1 + 6 * VTraits<v_uint16>::vlanes()));
s1 = v_add(vx_load_expand(S0 + 7 * VTraits<v_uint16>::vlanes()), vx_load_expand(S1 + 7 * VTraits<v_uint16>::vlanes()));
@ -2716,7 +2716,7 @@ public:
#elif CV_SIMD_WIDTH == 64
v_zip(t0, t3, s0, s1); v_zip(t1, t4, s2, s3); v_zip(t2, t5, s4, s5);
v_zip(s0, s3, t0, t1); v_zip(s1, s4, t2, t3); v_zip(s2, s5, t4, t5);
bh = t0 + t3; gh = t1 + t4; rh = t2 + t5;
bh = v_add(t0, t3); gh = v_add(t1, t4); rh = v_add(t2, t5);
#endif
v_store_interleave(D, v_rshr_pack<2>(bl, bh), v_rshr_pack<2>(gl, gh), v_rshr_pack<2>(rl, rh));
}
@ -2803,7 +2803,7 @@ public:
bl = v_add(t0, t3); gl = v_add(t1, t4); rl = v_add(t2, t5);
#else //CV_SIMD_WIDTH == 64
v_zip(t0, t3, s0, s1); v_zip(t1, t4, s2, s3); v_zip(t2, t5, s4, s5);
bl = s0 + s3; gl = s1 + s4; rl = s2 + s5;
bl = v_add(s0, s3); gl = v_add(s1, s4); rl = v_add(s2, s5);
#endif
s0 = v_add(vx_load_expand(S0 + 6 * VTraits<v_uint32>::vlanes()), vx_load_expand(S1 + 6 * VTraits<v_uint32>::vlanes()));
s1 = v_add(vx_load_expand(S0 + 7 * VTraits<v_uint32>::vlanes()), vx_load_expand(S1 + 7 * VTraits<v_uint32>::vlanes()));
@ -2819,7 +2819,7 @@ public:
bh = v_add(t0, t3); gh = v_add(t1, t4); rh = v_add(t2, t5);
#else //CV_SIMD_WIDTH == 64
v_zip(t0, t3, s0, s1); v_zip(t1, t4, s2, s3); v_zip(t2, t5, s4, s5);
bh = s0 + s3; gh = s1 + s4; rh = s2 + s5;
bh = v_add(s0, s3); gh = v_add(s1, s4); rh = v_add(s2, s5);
#endif
v_store_interleave(D, v_rshr_pack<2>(bl, bh), v_rshr_pack<2>(gl, gh), v_rshr_pack<2>(rl, rh));
}
@ -2857,7 +2857,7 @@ public:
v_expand(v_reinterpret_as_u16(r01), r01l, r01h);
v_expand(v_reinterpret_as_u16(r10), r10l, r10h);
v_expand(v_reinterpret_as_u16(r11), r11l, r11h);
v_store(D, v_rshr_pack<2>(r00l + r01l + r10l + r11l, r00h + r01h + r10h + r11h));
v_store(D, v_rshr_pack<2>(v_add(r00l, r01l, r10l, r11l), v_add(r00h, r01h, r10h, r11h)));
}
#else
for ( ; dx <= w - VTraits<v_uint32>::vlanes(); dx += VTraits<v_uint32>::vlanes(), S0 += VTraits<v_uint16>::vlanes(), S1 += VTraits<v_uint16>::vlanes(), D += VTraits<v_uint32>::vlanes())
@ -2933,7 +2933,7 @@ public:
bl = v_add(t0, t3); gl = v_add(t1, t4); rl = v_add(t2, t5);
#else //CV_SIMD_WIDTH == 64
v_zip(t0, t3, s0, s1); v_zip(t1, t4, s2, s3); v_zip(t2, t5, s4, s5);
bl = s0 + s3; gl = s1 + s4; rl = s2 + s5;
bl = v_add(s0, s3); gl = v_add(s1, s4); rl = v_add(s2, s5);
#endif
s0 = v_add(vx_load_expand(S0 + 6 * VTraits<v_int32>::vlanes()), vx_load_expand(S1 + 6 * VTraits<v_int32>::vlanes()));
s1 = v_add(vx_load_expand(S0 + 7 * VTraits<v_int32>::vlanes()), vx_load_expand(S1 + 7 * VTraits<v_int32>::vlanes()));
@ -2949,7 +2949,7 @@ public:
bh = v_add(t0, t3); gh = v_add(t1, t4); rh = v_add(t2, t5);
#else //CV_SIMD_WIDTH == 64
v_zip(t0, t3, s0, s1); v_zip(t1, t4, s2, s3); v_zip(t2, t5, s4, s5);
bh = s0 + s3; gh = s1 + s4; rh = s2 + s5;
bh = v_add(s0, s3); gh = v_add(s1, s4); rh = v_add(s2, s5);
#endif
v_store_interleave(D, v_rshr_pack<2>(bl, bh), v_rshr_pack<2>(gl, gh), v_rshr_pack<2>(rl, rh));
}
@ -2986,7 +2986,7 @@ public:
v_expand(v_reinterpret_as_s16(r01), r01l, r01h);
v_expand(v_reinterpret_as_s16(r10), r10l, r10h);
v_expand(v_reinterpret_as_s16(r11), r11l, r11h);
v_store(D, v_rshr_pack<2>(r00l + r01l + r10l + r11l, r00h + r01h + r10h + r11h));
v_store(D, v_rshr_pack<2>(v_add(r00l, r01l, r10l, r11l), v_add(r00h, r01h, r10h, r11h)));
#else
v_int32 r0, r1, r2, r3;
r0 = v_add(vx_load_expand(S0), vx_load_expand(S1));

View File

@ -680,6 +680,75 @@ TEST(Drawing, fillpoly_circle)
EXPECT_LT(diff_fp3, 1.);
}
TEST(Drawing, fillpoly_contours)
{
const int imgSize = 50;
const int type = CV_8UC1;
const int shift = 0;
const Scalar cl = Scalar::all(255);
const cv::LineTypes lineType = LINE_8;
// check that contours of fillPoly and polylines match
{
cv::Mat img(imgSize, imgSize, type);
img = 0;
std::vector<std::vector<cv::Point>> polygonPoints{
{ {44, 27}, {7, 37}, {7, 19}, {38, 19} }
};
cv::fillPoly(img, polygonPoints, cl, lineType, shift);
cv::polylines(img, polygonPoints, true, 0, 1, lineType, shift);
{
cv::Mat labelImage(img.size(), CV_32S);
int labels = cv::connectedComponents(img, labelImage, 4);
EXPECT_EQ(2, labels) << "filling went over the border";
}
}
// check that line generated with fillPoly and polylines match
{
cv::Mat img1(imgSize, imgSize, type), img2(imgSize, imgSize, type);
img1 = 0;
img2 = 0;
std::vector<std::vector<cv::Point>> polygonPoints{
{ {44, 27}, {38, 19} }
};
cv::fillPoly(img1, polygonPoints, cl, lineType, shift);
cv::polylines(img2, polygonPoints, true, cl, 1, lineType, shift);
EXPECT_MAT_N_DIFF(img1, img2, 0);
}
}
TEST(Drawing, fillpoly_match_lines)
{
const int imgSize = 49;
const int type = CV_8UC1;
const int shift = 0;
const Scalar cl = Scalar::all(255);
const cv::LineTypes lineType = LINE_8;
cv::Mat img1(imgSize, imgSize, type), img2(imgSize, imgSize, type);
for (int x1 = 0; x1 < imgSize; x1 += imgSize / 2)
{
for (int y1 = 0; y1 < imgSize; y1 += imgSize / 2)
{
for (int x2 = 0; x2 < imgSize; x2++)
{
for (int y2 = 0; y2 < imgSize; y2++)
{
img1 = 0;
img2 = 0;
std::vector<std::vector<cv::Point>> polygonPoints{
{ {x1, y1}, {x2, y2} }
};
cv::fillPoly(img1, polygonPoints, cl, lineType, shift);
cv::polylines(img2, polygonPoints, true, cl, 1, lineType, shift);
EXPECT_MAT_N_DIFF(img1, img2, 0);
}
}
}
}
}
TEST(Drawing, fillpoly_fully)
{
unsigned imageWidth = 256;

View File

@ -558,7 +558,7 @@ int CV_WarpAffineTest::prepare_test_case( int test_case_idx )
angle = cvtest::randReal(rng)*360;
scale = ((double)dst.rows/src.rows + (double)dst.cols/src.cols)*0.5;
getRotationMatrix2D(center, angle, scale).convertTo(mat, mat.depth());
rng.fill( tmp, CV_RAND_NORMAL, Scalar::all(1.), Scalar::all(0.01) );
rng.fill( tmp, RNG::NORMAL, Scalar::all(1.), Scalar::all(0.01) );
cv::max(tmp, 0.9, tmp);
cv::min(tmp, 1.1, tmp);
cv::multiply(tmp, mat, mat, 1.);
@ -673,7 +673,7 @@ int CV_WarpPerspectiveTest::prepare_test_case( int test_case_idx )
float bufer[16];
Mat tmp( 1, 16, CV_32FC1, bufer );
rng.fill( tmp, CV_RAND_NORMAL, Scalar::all(0.), Scalar::all(0.1) );
rng.fill( tmp, RNG::NORMAL, Scalar::all(0.), Scalar::all(0.1) );
for( i = 0; i < 4; i++ )
{

View File

@ -2,18 +2,18 @@
"name": "opencv_js_perf",
"description": "Perfermance tests for opencv js bindings",
"version": "1.0.0",
"dependencies" : {
"benchmark" : "latest"
"dependencies": {
"benchmark": "latest"
},
"repository": {
"type": "git",
"url": "https://github.com/opencv/opencv.git"
"type": "git",
"url": "https://github.com/opencv/opencv.git"
},
"keywords": [],
"author": "",
"license": "Apache 2.0 License",
"bugs": {
"url": "https://github.com/opencv/opencv/issues"
"url": "https://github.com/opencv/opencv/issues"
},
"homepage": "https://github.com/opencv/opencv"
}
}

View File

@ -155,6 +155,22 @@ public:
* @param target_id the id of target device
*/
CV_WRAP static Ptr<FaceRecognizerSF> create(CV_WRAP_FILE_PATH const String& model, CV_WRAP_FILE_PATH const String& config, int backend_id = 0, int target_id = 0);
/**
* @brief Creates an instance of this class from a buffer containing the model weights and configuration.
* @param framework Name of the framework (ONNX, etc.)
* @param bufferModel A buffer containing the binary model weights.
* @param bufferConfig A buffer containing the network configuration.
* @param backend_id The id of the backend.
* @param target_id The id of the target device.
*
* @return A pointer to the created instance of FaceRecognizerSF.
*/
CV_WRAP static Ptr<FaceRecognizerSF> create(const String& framework,
const std::vector<uchar>& bufferModel,
const std::vector<uchar>& bufferConfig,
int backend_id = 0,
int target_id = 0);
};
//! @}

View File

@ -26,6 +26,19 @@ public:
net.setPreferableBackend(backend_id);
net.setPreferableTarget(target_id);
}
FaceRecognizerSFImpl(const String& framework,
const std::vector<uchar>& bufferModel,
const std::vector<uchar>& bufferConfig,
int backend_id, int target_id)
{
net = dnn::readNet(framework, bufferModel, bufferConfig);
CV_Assert(!net.empty());
net.setPreferableBackend(backend_id);
net.setPreferableTarget(target_id);
}
void alignCrop(InputArray _src_img, InputArray _face_mat, OutputArray _aligned_img) const override
{
Mat face_mat = _face_mat.getMat();
@ -189,4 +202,17 @@ Ptr<FaceRecognizerSF> FaceRecognizerSF::create(const String& model, const String
#endif
}
Ptr<FaceRecognizerSF> FaceRecognizerSF::create(const String& framework,
const std::vector<uchar>& bufferModel,
const std::vector<uchar>& bufferConfig,
int backend_id, int target_id)
{
#ifdef HAVE_OPENCV_DNN
return makePtr<FaceRecognizerSFImpl>(framework, bufferModel, bufferConfig, backend_id, target_id);
#else
CV_UNUSED(bufferModel); CV_UNUSED(bufferConfig); CV_UNUSED(backend_id); CV_UNUSED(target_id);
CV_Error(cv::Error::StsNotImplemented, "cv::FaceRecognizerSF requires enabled 'dnn' module");
#endif
}
} // namespace cv

View File

@ -49,8 +49,8 @@
#include <type_traits>
#include "precomp.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/photo/legacy/constants_c.h"
using namespace cv;
#undef CV_MAT_ELEM_PTR_FAST
#define CV_MAT_ELEM_PTR_FAST( mat, row, col, pix_size ) \
@ -74,7 +74,6 @@ min4( float a, float b, float c, float d )
return MIN(a,c);
}
#define CV_MAT_3COLOR_ELEM(img,type,y,x,c) CV_MAT_ELEM(img,type,y,(x)*3+(c))
#define KNOWN 0 //known outside narrow band
#define BAND 1 //narrow band (known)
#define INSIDE 2 //unknown
@ -109,11 +108,11 @@ protected:
int next_order;
public:
bool Add(const CvMat* f) {
bool Add(const Mat &f) {
int i,j;
for (i=0; i<f->rows; i++) {
for (j=0; j<f->cols; j++) {
if (CV_MAT_ELEM(*f,uchar,i,j)!=0) {
for (i=0; i<f.rows; i++) {
for (j=0; j<f.cols; j++) {
if (f.at<uchar>(i, j)!=0) {
if (!Push(i,j,0)) return false;
}
}
@ -166,22 +165,22 @@ static inline float VectorLength(const cv::Point2f& v1)
//HEAP::iterator Heap_Iterator;
//HEAP Heap;
static float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, const CvMat* t)
static float FastMarching_solve(int i1,int j1,int i2,int j2, const Mat &f, const Mat &t)
{
double sol, a11, a22, m12;
a11=CV_MAT_ELEM(*t,float,i1,j1);
a22=CV_MAT_ELEM(*t,float,i2,j2);
a11=t.at<float>(i1,j1);
a22=t.at<float>(i2,j2);
m12=MIN(a11,a22);
if( CV_MAT_ELEM(*f,uchar,i1,j1) != INSIDE )
if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE )
if( f.at<uchar>(i1,j1) != INSIDE )
if( f.at<uchar>(i2,j2) != INSIDE )
if( fabs(a11-a22) >= 1.0 )
sol = 1+m12;
else
sol = (a11+a22+sqrt((double)(2-(a11-a22)*(a11-a22))))*0.5;
else
sol = 1+a11;
else if( CV_MAT_ELEM(*f,uchar,i2,j2) != INSIDE )
else if( f.at<uchar>(i2,j2) != INSIDE )
sol = 1+a22;
else
sol = 1+m12;
@ -193,14 +192,14 @@ static float FastMarching_solve(int i1,int j1,int i2,int j2, const CvMat* f, con
static void
icvCalcFMM(const CvMat *f, CvMat *t, CvPriorityQueueFloat *Heap, bool negate) {
icvCalcFMM(Mat &f, Mat &t, CvPriorityQueueFloat *Heap, bool negate) {
int i, j, ii = 0, jj = 0, q;
float dist;
while (Heap->Pop(&ii,&jj)) {
unsigned known=(negate)?CHANGE:KNOWN;
CV_MAT_ELEM(*f,uchar,ii,jj) = (uchar)known;
f.at<uchar>(ii,jj) = (uchar)known;
for (q=0; q<4; q++) {
i=0; j=0;
@ -208,26 +207,26 @@ icvCalcFMM(const CvMat *f, CvMat *t, CvPriorityQueueFloat *Heap, bool negate) {
else if(q==1) {i=ii; j=jj-1;}
else if(q==2) {i=ii+1; j=jj;}
else {i=ii; j=jj+1;}
if ((i<=0)||(j<=0)||(i>f->rows)||(j>f->cols)) continue;
if ((i<=0)||(j<=0)||(i>f.rows)||(j>f.cols)) continue;
if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) {
if (f.at<uchar>(i,j)==INSIDE) {
dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t),
FastMarching_solve(i+1,j,i,j-1,f,t),
FastMarching_solve(i-1,j,i,j+1,f,t),
FastMarching_solve(i+1,j,i,j+1,f,t));
CV_MAT_ELEM(*t,float,i,j) = dist;
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
t.at<float>(i,j) = dist;
f.at<uchar>(i,j) = BAND;
Heap->Push(i,j,dist);
}
}
}
if (negate) {
for (i=0; i<f->rows; i++) {
for(j=0; j<f->cols; j++) {
if (CV_MAT_ELEM(*f,uchar,i,j) == CHANGE) {
CV_MAT_ELEM(*f,uchar,i,j) = KNOWN;
CV_MAT_ELEM(*t,float,i,j) = -CV_MAT_ELEM(*t,float,i,j);
for (i=0; i<f.rows; i++) {
for(j=0; j<f.cols; j++) {
if (f.at<uchar>(i,j) == CHANGE) {
f.at<uchar>(i,j) = KNOWN;
t.at<float>(i,j) = -t.at<float>(i,j);
}
}
}
@ -236,53 +235,54 @@ icvCalcFMM(const CvMat *f, CvMat *t, CvPriorityQueueFloat *Heap, bool negate) {
template <typename data_type>
static void
icvTeleaInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueueFloat *Heap ) {
icvTeleaInpaintFMM(Mat &f, Mat &t, Mat &out, int range, CvPriorityQueueFloat *Heap ) {
int i = 0, j = 0, ii = 0, jj = 0, k, l, q, color = 0;
float dist;
if (CV_MAT_CN(out->type)==3) {
if (out.channels()==3) {
typedef Vec<uchar, 3> PixelT;
while (Heap->Pop(&ii,&jj)) {
CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN;
f.at<uchar>(ii,jj) = KNOWN;
for(q=0; q<4; q++) {
if (q==0) {i=ii-1; j=jj;}
else if(q==1) {i=ii; j=jj-1;}
else if(q==2) {i=ii+1; j=jj;}
else if(q==3) {i=ii; j=jj+1;}
if ((i<=0)||(j<=0)||(i>t->rows-1)||(j>t->cols-1)) continue;
if ((i<=0)||(j<=0)||(i>t.rows-1)||(j>t.cols-1)) continue;
if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) {
if (f.at<uchar>(i,j)==INSIDE) {
dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t),
FastMarching_solve(i+1,j,i,j-1,f,t),
FastMarching_solve(i-1,j,i,j+1,f,t),
FastMarching_solve(i+1,j,i,j+1,f,t));
CV_MAT_ELEM(*t,float,i,j) = dist;
t.at<float>(i,j) = dist;
cv::Point2f gradT[3];
for (color=0; color<=2; color++) {
if (CV_MAT_ELEM(*f,uchar,i,j+1)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) {
gradT[color].x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j-1)))*0.5f;
if (f.at<uchar>(i,j+1)!=INSIDE) {
if (f.at<uchar>(i,j-1)!=INSIDE) {
gradT[color].x=(float)((t.at<float>(i,j+1)-t.at<float>(i,j-1)))*0.5f;
} else {
gradT[color].x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j)));
gradT[color].x=(float)((t.at<float>(i,j+1)-t.at<float>(i,j)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) {
gradT[color].x=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i,j-1)));
if (f.at<uchar>(i,j-1)!=INSIDE) {
gradT[color].x=(float)((t.at<float>(i,j)-t.at<float>(i,j-1)));
} else {
gradT[color].x=0;
}
}
if (CV_MAT_ELEM(*f,uchar,i+1,j)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) {
gradT[color].y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i-1,j)))*0.5f;
if (f.at<uchar>(i+1,j)!=INSIDE) {
if (f.at<uchar>(i-1,j)!=INSIDE) {
gradT[color].y=(float)((t.at<float>(i+1,j)-t.at<float>(i-1,j)))*0.5f;
} else {
gradT[color].y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i,j)));
gradT[color].y=(float)((t.at<float>(i+1,j)-t.at<float>(i,j)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) {
gradT[color].y=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i-1,j)));
if (f.at<uchar>(i-1,j)!=INSIDE) {
gradT[color].y=(float)((t.at<float>(i,j)-t.at<float>(i-1,j)));
} else {
gradT[color].y=0;
}
@ -297,50 +297,50 @@ icvTeleaInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQu
float w,dst,lev,dir,sat;
for (k=i-range; k<=i+range; k++) {
int km=k-1+(k==1),kp=k-1-(k==t->rows-2);
int km=k-1+(k==1),kp=k-1-(k==t.rows-2);
for (l=j-range; l<=j+range; l++) {
int lm=l-1+(l==1),lp=l-1-(l==t->cols-2);
if (k>0&&l>0&&k<t->rows-1&&l<t->cols-1) {
if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&&
int lm=l-1+(l==1),lp=l-1-(l==t.cols-2);
if (k>0&&l>0&&k<t.rows-1&&l<t.cols-1) {
if ((f.at<uchar>(k,l)!=INSIDE)&&
((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) {
for (color=0; color<=2; color++) {
r.y = (float)(i-k);
r.x = (float)(j-l);
dst = (float)(1./(VectorLength(r)*sqrt((double)VectorLength(r))));
lev = (float)(1./(1+fabs(CV_MAT_ELEM(*t,float,k,l)-CV_MAT_ELEM(*t,float,i,j))));
lev = (float)(1./(1+fabs(t.at<float>(k,l)-t.at<float>(i,j))));
dir=VectorScalMult(r,gradT[color]);
if (fabs(dir)<=0.01) dir=0.000001f;
w = (float)fabs(dst*lev*dir);
if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
gradI.x=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,km,lp+1,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm-1,color)))*2.0f;
if (f.at<uchar>(k,l+1)!=INSIDE) {
if (f.at<uchar>(k,l-1)!=INSIDE) {
gradI.x=(float)((out.at<PixelT>(km,lp+1)[color]-out.at<PixelT>(km,lm-1)[color]))*2.0f;
} else {
gradI.x=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,km,lp+1,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)));
gradI.x=(float)((out.at<PixelT>(km,lp+1)[color]-out.at<PixelT>(km,lm)[color]));
}
} else {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
gradI.x=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,km,lp,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm-1,color)));
if (f.at<uchar>(k,l-1)!=INSIDE) {
gradI.x=(float)((out.at<PixelT>(km,lp)[color]-out.at<PixelT>(km,lm-1)[color]));
} else {
gradI.x=0;
}
}
if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
gradI.y=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,kp+1,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km-1,lm,color)))*2.0f;
if (f.at<uchar>(k+1,l)!=INSIDE) {
if (f.at<uchar>(k-1,l)!=INSIDE) {
gradI.y=(float)((out.at<PixelT>(kp+1,lm)[color]-out.at<PixelT>(km-1,lm)[color]))*2.0f;
} else {
gradI.y=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,kp+1,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)));
gradI.y=(float)((out.at<PixelT>(kp+1,lm)[color]-out.at<PixelT>(km,lm)[color]));
}
} else {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
gradI.y=(float)((CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km-1,lm,color)));
if (f.at<uchar>(k-1,l)!=INSIDE) {
gradI.y=(float)((out.at<PixelT>(kp,lm)[color]-out.at<PixelT>(km-1,lm)[color]));
} else {
gradI.y=0;
}
}
Ia[color] += (float)w * (float)(CV_MAT_3COLOR_ELEM(*out,uchar,k-1,l-1,color));
Ia[color] += (float)w * (float)(out.at<PixelT>(k-1,l-1)[color]);
Jx[color] -= (float)w * (float)(gradI.x*r.x);
Jy[color] -= (float)w * (float)(gradI.y*r.y);
s[color] += w;
@ -351,108 +351,108 @@ icvTeleaInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQu
}
for (color=0; color<=2; color++) {
sat = (float)(Ia[color]/s[color]+(Jx[color]+Jy[color])/(sqrt(Jx[color]*Jx[color]+Jy[color]*Jy[color])+1.0e-20f));
CV_MAT_3COLOR_ELEM(*out,uchar,i-1,j-1,color) = round_cast<uchar>(sat);
out.at<PixelT>(i-1,j-1)[color] = round_cast<uchar>(sat);
}
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
f.at<uchar>(i,j) = BAND;
Heap->Push(i,j,dist);
}
}
}
} else if (CV_MAT_CN(out->type)==1) {
} else if (out.channels()==1) {
while (Heap->Pop(&ii,&jj)) {
CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN;
f.at<uchar>(ii,jj) = KNOWN;
for(q=0; q<4; q++) {
if (q==0) {i=ii-1; j=jj;}
else if(q==1) {i=ii; j=jj-1;}
else if(q==2) {i=ii+1; j=jj;}
else if(q==3) {i=ii; j=jj+1;}
if ((i<=0)||(j<=0)||(i>t->rows-1)||(j>t->cols-1)) continue;
if ((i<=0)||(j<=0)||(i>t.rows-1)||(j>t.cols-1)) continue;
if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) {
if (f.at<uchar>(i,j)==INSIDE) {
dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t),
FastMarching_solve(i+1,j,i,j-1,f,t),
FastMarching_solve(i-1,j,i,j+1,f,t),
FastMarching_solve(i+1,j,i,j+1,f,t));
CV_MAT_ELEM(*t,float,i,j) = dist;
t.at<float>(i,j) = dist;
for (color=0; color<=0; color++) {
cv::Point2f gradI,gradT,r;
float Ia=0,Jx=0,Jy=0,s=1.0e-20f,w,dst,lev,dir,sat;
if (CV_MAT_ELEM(*f,uchar,i,j+1)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) {
gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j-1)))*0.5f;
if (f.at<uchar>(i,j+1)!=INSIDE) {
if (f.at<uchar>(i,j-1)!=INSIDE) {
gradT.x=(float)((t.at<float>(i,j+1)-t.at<float>(i,j-1)))*0.5f;
} else {
gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j+1)-CV_MAT_ELEM(*t,float,i,j)));
gradT.x=(float)((t.at<float>(i,j+1)-t.at<float>(i,j)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,i,j-1)!=INSIDE) {
gradT.x=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i,j-1)));
if (f.at<uchar>(i,j-1)!=INSIDE) {
gradT.x=(float)((t.at<float>(i,j)-t.at<float>(i,j-1)));
} else {
gradT.x=0;
}
}
if (CV_MAT_ELEM(*f,uchar,i+1,j)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) {
gradT.y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i-1,j)))*0.5f;
if (f.at<uchar>(i+1,j)!=INSIDE) {
if (f.at<uchar>(i-1,j)!=INSIDE) {
gradT.y=(float)((t.at<float>(i+1,j)-t.at<float>(i-1,j)))*0.5f;
} else {
gradT.y=(float)((CV_MAT_ELEM(*t,float,i+1,j)-CV_MAT_ELEM(*t,float,i,j)));
gradT.y=(float)((t.at<float>(i+1,j)-t.at<float>(i,j)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,i-1,j)!=INSIDE) {
gradT.y=(float)((CV_MAT_ELEM(*t,float,i,j)-CV_MAT_ELEM(*t,float,i-1,j)));
if (f.at<uchar>(i-1,j)!=INSIDE) {
gradT.y=(float)((t.at<float>(i,j)-t.at<float>(i-1,j)));
} else {
gradT.y=0;
}
}
for (k=i-range; k<=i+range; k++) {
int km=k-1+(k==1),kp=k-1-(k==t->rows-2);
int km=k-1+(k==1),kp=k-1-(k==t.rows-2);
for (l=j-range; l<=j+range; l++) {
int lm=l-1+(l==1),lp=l-1-(l==t->cols-2);
if (k>0&&l>0&&k<t->rows-1&&l<t->cols-1) {
if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&&
int lm=l-1+(l==1),lp=l-1-(l==t.cols-2);
if (k>0&&l>0&&k<t.rows-1&&l<t.cols-1) {
if ((f.at<uchar>(k,l)!=INSIDE)&&
((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) {
r.y = (float)(i-k);
r.x = (float)(j-l);
dst = (float)(1./(VectorLength(r)*sqrt(VectorLength(r))));
lev = (float)(1./(1+fabs(CV_MAT_ELEM(*t,float,k,l)-CV_MAT_ELEM(*t,float,i,j))));
lev = (float)(1./(1+fabs(t.at<float>(k,l)-t.at<float>(i,j))));
dir=VectorScalMult(r,gradT);
if (fabs(dir)<=0.01) dir=0.000001f;
w = (float)fabs(dst*lev*dir);
if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
gradI.x=(float)((CV_MAT_ELEM(*out,data_type,km,lp+1)-CV_MAT_ELEM(*out,data_type,km,lm-1)))*2.0f;
if (f.at<uchar>(k,l+1)!=INSIDE) {
if (f.at<uchar>(k,l-1)!=INSIDE) {
gradI.x=(float)((out.at<data_type>(km,lp+1)-out.at<data_type>(km,lm-1)))*2.0f;
} else {
gradI.x=(float)((CV_MAT_ELEM(*out,data_type,km,lp+1)-CV_MAT_ELEM(*out,data_type,km,lm)));
gradI.x=(float)((out.at<data_type>(km,lp+1)-out.at<data_type>(km,lm)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
gradI.x=(float)((CV_MAT_ELEM(*out,data_type,km,lp)-CV_MAT_ELEM(*out,data_type,km,lm-1)));
if (f.at<uchar>(k,l-1)!=INSIDE) {
gradI.x=(float)((out.at<data_type>(km,lp)-out.at<data_type>(km,lm-1)));
} else {
gradI.x=0;
}
}
if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
gradI.y=(float)((CV_MAT_ELEM(*out,data_type,kp+1,lm)-CV_MAT_ELEM(*out,data_type,km-1,lm)))*2.0f;
if (f.at<uchar>(k+1,l)!=INSIDE) {
if (f.at<uchar>(k-1,l)!=INSIDE) {
gradI.y=(float)((out.at<data_type>(kp+1,lm)-out.at<data_type>(km-1,lm)))*2.0f;
} else {
gradI.y=(float)((CV_MAT_ELEM(*out,data_type,kp+1,lm)-CV_MAT_ELEM(*out,data_type,km,lm)));
gradI.y=(float)((out.at<data_type>(kp+1,lm)-out.at<data_type>(km,lm)));
}
} else {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
gradI.y=(float)((CV_MAT_ELEM(*out,data_type,kp,lm)-CV_MAT_ELEM(*out,data_type,km-1,lm)));
if (f.at<uchar>(k-1,l)!=INSIDE) {
gradI.y=(float)((out.at<data_type>(kp,lm)-out.at<data_type>(km-1,lm)));
} else {
gradI.y=0;
}
}
Ia += (float)w * (float)(CV_MAT_ELEM(*out,data_type,k-1,l-1));
Ia += (float)w * (float)(out.at<data_type>(k-1,l-1));
Jx -= (float)w * (float)(gradI.x*r.x);
Jy -= (float)w * (float)(gradI.y*r.y);
s += w;
@ -462,11 +462,11 @@ icvTeleaInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQu
}
sat = (float)(Ia/s+(Jx+Jy)/(sqrt(Jx*Jx+Jy*Jy)+1.0e-20f));
{
CV_MAT_ELEM(*out,data_type,i-1,j-1) = round_cast<data_type>(sat);
out.at<data_type>(i-1,j-1) = round_cast<data_type>(sat);
}
}
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
f.at<uchar>(i,j) = BAND;
Heap->Push(i,j,dist);
}
}
@ -476,28 +476,29 @@ icvTeleaInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQu
template <typename data_type>
static void
icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueueFloat *Heap) {
icvNSInpaintFMM(Mat &f, Mat &t, Mat &out, int range, CvPriorityQueueFloat *Heap) {
int i = 0, j = 0, ii = 0, jj = 0, k, l, q, color = 0;
float dist;
if (CV_MAT_CN(out->type)==3) {
if (out.channels()==3) {
typedef Vec<uchar, 3> PixelT;
while (Heap->Pop(&ii,&jj)) {
CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN;
f.at<uchar>(ii,jj) = KNOWN;
for(q=0; q<4; q++) {
if (q==0) {i=ii-1; j=jj;}
else if(q==1) {i=ii; j=jj-1;}
else if(q==2) {i=ii+1; j=jj;}
else if(q==3) {i=ii; j=jj+1;}
if ((i<=0)||(j<=0)||(i>t->rows-1)||(j>t->cols-1)) continue;
if ((i<=0)||(j<=0)||(i>t.rows-1)||(j>t.cols-1)) continue;
if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) {
if (f.at<uchar>(i,j)==INSIDE) {
dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t),
FastMarching_solve(i+1,j,i,j-1,f,t),
FastMarching_solve(i-1,j,i,j+1,f,t),
FastMarching_solve(i+1,j,i,j+1,f,t));
CV_MAT_ELEM(*t,float,i,j) = dist;
t.at<float>(i,j) = dist;
cv::Point2f gradI,r;
float Ia[3]={0,0,0};
@ -505,11 +506,11 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
float w,dst,dir;
for (k=i-range; k<=i+range; k++) {
int km=k-1+(k==1),kp=k-1-(k==f->rows-2);
int km=k-1+(k==1),kp=k-1-(k==f.rows-2);
for (l=j-range; l<=j+range; l++) {
int lm=l-1+(l==1),lp=l-1-(l==f->cols-2);
if (k>0&&l>0&&k<f->rows-1&&l<f->cols-1) {
if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&&
int lm=l-1+(l==1),lp=l-1-(l==f.cols-2);
if (k>0&&l>0&&k<f.rows-1&&l<f.cols-1) {
if ((f.at<uchar>(k,l)!=INSIDE)&&
((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) {
for (color=0; color<=2; color++) {
r.y=(float)(k-i);
@ -517,30 +518,30 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
dst = 1/(VectorLength(r)*VectorLength(r)+1);
if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
gradI.x=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,kp+1,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color))+
abs(CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km-1,lm,color)));
if (f.at<uchar>(k+1,l)!=INSIDE) {
if (f.at<uchar>(k-1,l)!=INSIDE) {
gradI.x=(float)(abs(out.at<PixelT>(kp+1,lm)[color]-out.at<PixelT>(kp,lm)[color])+
abs(out.at<PixelT>(kp,lm)[color]-out.at<PixelT>(km-1,lm)[color]));
} else {
gradI.x=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,kp+1,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color)))*2.0f;
gradI.x=(float)(abs(out.at<PixelT>(kp+1,lm)[color]-out.at<PixelT>(kp,lm)[color]))*2.0f;
}
} else {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
gradI.x=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,kp,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km-1,lm,color)))*2.0f;
if (f.at<uchar>(k-1,l)!=INSIDE) {
gradI.x=(float)(abs(out.at<PixelT>(kp,lm)[color]-out.at<PixelT>(km-1,lm)[color]))*2.0f;
} else {
gradI.x=0;
}
}
if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
gradI.y=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,km,lp+1,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color))+
abs(CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm-1,color)));
if (f.at<uchar>(k,l+1)!=INSIDE) {
if (f.at<uchar>(k,l-1)!=INSIDE) {
gradI.y=(float)(abs(out.at<PixelT>(km,lp+1)[color]-out.at<PixelT>(km,lm)[color])+
abs(out.at<PixelT>(km,lm)[color]-out.at<PixelT>(km,lm-1)[color]));
} else {
gradI.y=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,km,lp+1,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)))*2.0f;
gradI.y=(float)(abs(out.at<PixelT>(km,lp+1)[color]-out.at<PixelT>(km,lm)[color]))*2.0f;
}
} else {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
gradI.y=(float)(abs(CV_MAT_3COLOR_ELEM(*out,uchar,km,lm,color)-CV_MAT_3COLOR_ELEM(*out,uchar,km,lm-1,color)))*2.0f;
if (f.at<uchar>(k,l-1)!=INSIDE) {
gradI.y=(float)(abs(out.at<PixelT>(km,lm)[color]-out.at<PixelT>(km,lm-1)[color]))*2.0f;
} else {
gradI.y=0;
}
@ -555,7 +556,7 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
dir = (float)fabs(VectorScalMult(r,gradI)/sqrt(VectorLength(r)*VectorLength(gradI)));
}
w = dst*dir;
Ia[color] += (float)w * (float)(CV_MAT_3COLOR_ELEM(*out,uchar,k-1,l-1,color));
Ia[color] += (float)w * (float)(out.at<PixelT>(k-1,l-1)[color]);
s[color] += w;
}
}
@ -563,74 +564,74 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
}
}
for (color=0; color<=2; color++) {
CV_MAT_3COLOR_ELEM(*out,uchar,i-1,j-1,color) = cv::saturate_cast<uchar>((double)Ia[color]/s[color]);
out.at<PixelT>(i-1,j-1)[color] = cv::saturate_cast<uchar>((double)Ia[color]/s[color]);
}
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
f.at<uchar>(i,j) = BAND;
Heap->Push(i,j,dist);
}
}
}
} else if (CV_MAT_CN(out->type)==1) {
} else if (out.channels()==1) {
while (Heap->Pop(&ii,&jj)) {
CV_MAT_ELEM(*f,uchar,ii,jj) = KNOWN;
f.at<uchar>(ii,jj) = KNOWN;
for(q=0; q<4; q++) {
if (q==0) {i=ii-1; j=jj;}
else if(q==1) {i=ii; j=jj-1;}
else if(q==2) {i=ii+1; j=jj;}
else if(q==3) {i=ii; j=jj+1;}
if ((i<=0)||(j<=0)||(i>t->rows-1)||(j>t->cols-1)) continue;
if ((i<=0)||(j<=0)||(i>t.rows-1)||(j>t.cols-1)) continue;
if (CV_MAT_ELEM(*f,uchar,i,j)==INSIDE) {
if (f.at<uchar>(i,j)==INSIDE) {
dist = min4(FastMarching_solve(i-1,j,i,j-1,f,t),
FastMarching_solve(i+1,j,i,j-1,f,t),
FastMarching_solve(i-1,j,i,j+1,f,t),
FastMarching_solve(i+1,j,i,j+1,f,t));
CV_MAT_ELEM(*t,float,i,j) = dist;
t.at<float>(i,j) = dist;
{
cv::Point2f gradI,r;
float Ia=0,s=1.0e-20f,w,dst,dir;
for (k=i-range; k<=i+range; k++) {
int km=k-1+(k==1),kp=k-1-(k==t->rows-2);
int km=k-1+(k==1),kp=k-1-(k==t.rows-2);
for (l=j-range; l<=j+range; l++) {
int lm=l-1+(l==1),lp=l-1-(l==t->cols-2);
if (k>0&&l>0&&k<t->rows-1&&l<t->cols-1) {
if ((CV_MAT_ELEM(*f,uchar,k,l)!=INSIDE)&&
int lm=l-1+(l==1),lp=l-1-(l==t.cols-2);
if (k>0&&l>0&&k<t.rows-1&&l<t.cols-1) {
if ((f.at<uchar>(k,l)!=INSIDE)&&
((l-j)*(l-j)+(k-i)*(k-i)<=range*range)) {
r.y=(float)(i-k);
r.x=(float)(j-l);
dst = 1/(VectorLength(r)*VectorLength(r)+1);
if (CV_MAT_ELEM(*f,uchar,k+1,l)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
gradI.x=(float)(std::abs(CV_MAT_ELEM(*out,data_type,kp+1,lm)-CV_MAT_ELEM(*out,data_type,kp,lm))+
std::abs(CV_MAT_ELEM(*out,data_type,kp,lm)-CV_MAT_ELEM(*out,data_type,km-1,lm)));
if (f.at<uchar>(k+1,l)!=INSIDE) {
if (f.at<uchar>(k-1,l)!=INSIDE) {
gradI.x=(float)(std::abs(out.at<data_type>(kp+1,lm)-out.at<data_type>(kp,lm))+
std::abs(out.at<data_type>(kp,lm)-out.at<data_type>(km-1,lm)));
} else {
gradI.x=(float)(std::abs(CV_MAT_ELEM(*out,data_type,kp+1,lm)-CV_MAT_ELEM(*out,data_type,kp,lm)))*2.0f;
gradI.x=(float)(std::abs(out.at<data_type>(kp+1,lm)-out.at<data_type>(kp,lm)))*2.0f;
}
} else {
if (CV_MAT_ELEM(*f,uchar,k-1,l)!=INSIDE) {
gradI.x=(float)(std::abs(CV_MAT_ELEM(*out,data_type,kp,lm)-CV_MAT_ELEM(*out,data_type,km-1,lm)))*2.0f;
if (f.at<uchar>(k-1,l)!=INSIDE) {
gradI.x=(float)(std::abs(out.at<data_type>(kp,lm)-out.at<data_type>(km-1,lm)))*2.0f;
} else {
gradI.x=0;
}
}
if (CV_MAT_ELEM(*f,uchar,k,l+1)!=INSIDE) {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
gradI.y=(float)(std::abs(CV_MAT_ELEM(*out,data_type,km,lp+1)-CV_MAT_ELEM(*out,data_type,km,lm))+
std::abs(CV_MAT_ELEM(*out,data_type,km,lm)-CV_MAT_ELEM(*out,data_type,km,lm-1)));
if (f.at<uchar>(k,l+1)!=INSIDE) {
if (f.at<uchar>(k,l-1)!=INSIDE) {
gradI.y=(float)(std::abs(out.at<data_type>(km,lp+1)-out.at<data_type>(km,lm))+
std::abs(out.at<data_type>(km,lm)-out.at<data_type>(km,lm-1)));
} else {
gradI.y=(float)(std::abs(CV_MAT_ELEM(*out,data_type,km,lp+1)-CV_MAT_ELEM(*out,data_type,km,lm)))*2.0f;
gradI.y=(float)(std::abs(out.at<data_type>(km,lp+1)-out.at<data_type>(km,lm)))*2.0f;
}
} else {
if (CV_MAT_ELEM(*f,uchar,k,l-1)!=INSIDE) {
gradI.y=(float)(std::abs(CV_MAT_ELEM(*out,data_type,km,lm)-CV_MAT_ELEM(*out,data_type,km,lm-1)))*2.0f;
if (f.at<uchar>(k,l-1)!=INSIDE) {
gradI.y=(float)(std::abs(out.at<data_type>(km,lm)-out.at<data_type>(km,lm-1)))*2.0f;
} else {
gradI.y=0;
}
@ -645,16 +646,16 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
dir = (float)fabs(VectorScalMult(r,gradI)/sqrt(VectorLength(r)*VectorLength(gradI)));
}
w = dst*dir;
Ia += (float)w * (float)(CV_MAT_ELEM(*out,data_type,k-1,l-1));
Ia += (float)w * (float)(out.at<data_type>(k-1,l-1));
s += w;
}
}
}
}
CV_MAT_ELEM(*out,data_type,i-1,j-1) = cv::saturate_cast<data_type>((double)Ia/s);
out.at<data_type>(i-1,j-1) = cv::saturate_cast<data_type>((double)Ia/s);
}
CV_MAT_ELEM(*f,uchar,i,j) = BAND;
f.at<uchar>(i,j) = BAND;
Heap->Push(i,j,dist);
}
}
@ -665,99 +666,94 @@ icvNSInpaintFMM(const CvMat *f, CvMat *t, CvMat *out, int range, CvPriorityQueue
#define SET_BORDER1_C1(image,type,value) {\
int i,j;\
for(j=0; j<image->cols; j++) {\
CV_MAT_ELEM(*image,type,0,j) = value;\
for(j=0; j<image.cols; j++) {\
image.at<type>(0,j) = value;\
}\
for (i=1; i<image->rows-1; i++) {\
CV_MAT_ELEM(*image,type,i,0) = CV_MAT_ELEM(*image,type,i,image->cols-1) = value;\
for (i=1; i<image.rows-1; i++) {\
image.at<type>(i,0) = image.at<type>(i,image.cols-1) = value;\
}\
for(j=0; j<image->cols; j++) {\
CV_MAT_ELEM(*image,type,erows-1,j) = value;\
for(j=0; j<image.cols; j++) {\
image.at<type>(erows-1,j) = value;\
}\
}
#define COPY_MASK_BORDER1_C1(src,dst,type) {\
int i,j;\
for (i=0; i<src->rows; i++) {\
for(j=0; j<src->cols; j++) {\
if (CV_MAT_ELEM(*src,type,i,j)!=0)\
CV_MAT_ELEM(*dst,type,i+1,j+1) = INSIDE;\
for (i=0; i<src.rows; i++) {\
for(j=0; j<src.cols; j++) {\
if (src.at<type>(i,j)!=0)\
dst.at<type>(i+1,j+1) = INSIDE;\
}\
}\
}
static void
icvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_img,
icvInpaint( const Mat &input_img, const Mat &inpaint_mask, Mat &output_img,
double inpaintRange, int flags )
{
cv::Ptr<CvMat> mask, band, f, t, out;
cv::Mat mask, band, f, t, out;
cv::Ptr<CvPriorityQueueFloat> Heap, Out;
cv::Mat el_range, el_cross; // structuring elements for dilate
CvMat input_hdr, mask_hdr, output_hdr;
CvMat* input_img, *inpaint_mask, *output_img;
int range=cvRound(inpaintRange);
int erows, ecols;
input_img = cvGetMat( _input_img, &input_hdr );
inpaint_mask = cvGetMat( _inpaint_mask, &mask_hdr );
output_img = cvGetMat( _output_img, &output_hdr );
if( !CV_ARE_SIZES_EQ(input_img,output_img) || !CV_ARE_SIZES_EQ(input_img,inpaint_mask))
if((input_img.size() != output_img.size()) || (input_img.size() != inpaint_mask.size()))
CV_Error( cv::Error::StsUnmatchedSizes, "All the input and output images must have the same size" );
if( (CV_MAT_TYPE(input_img->type) != CV_8U &&
CV_MAT_TYPE(input_img->type) != CV_16U &&
CV_MAT_TYPE(input_img->type) != CV_32F &&
CV_MAT_TYPE(input_img->type) != CV_8UC3) ||
!CV_ARE_TYPES_EQ(input_img,output_img) )
if( (input_img.type() != CV_8U &&
input_img.type() != CV_16U &&
input_img.type() != CV_32F &&
input_img.type() != CV_8UC3) ||
(input_img.type() != output_img.type()) )
CV_Error( cv::Error::StsUnsupportedFormat,
"8-bit, 16-bit unsigned or 32-bit float 1-channel and 8-bit 3-channel input/output images are supported" );
if( CV_MAT_TYPE(inpaint_mask->type) != CV_8UC1 )
if( inpaint_mask.type() != CV_8UC1 )
CV_Error( cv::Error::StsUnsupportedFormat, "The mask must be 8-bit 1-channel image" );
range = MAX(range,1);
range = MIN(range,100);
ecols = input_img->cols + 2;
erows = input_img->rows + 2;
ecols = input_img.cols + 2;
erows = input_img.rows + 2;
f.reset(cvCreateMat(erows, ecols, CV_8UC1));
t.reset(cvCreateMat(erows, ecols, CV_32FC1));
band.reset(cvCreateMat(erows, ecols, CV_8UC1));
mask.reset(cvCreateMat(erows, ecols, CV_8UC1));
f.create(erows, ecols, CV_8UC1);
t.create(erows, ecols, CV_32FC1);
band.create(erows, ecols, CV_8UC1);
mask.create(erows, ecols, CV_8UC1);
el_cross = cv::getStructuringElement(cv::MORPH_CROSS, cv::Size(3, 3), cv::Point(1, 1));
cvCopy( input_img, output_img );
cvSet(mask,cvScalar(KNOWN,0,0,0));
input_img.copyTo( output_img );
mask.setTo(Scalar(KNOWN,0,0,0));
COPY_MASK_BORDER1_C1(inpaint_mask,mask,uchar);
SET_BORDER1_C1(mask,uchar,0);
cvSet(f,cvScalar(KNOWN,0,0,0));
cvSet(t,cvScalar(1.0e6f,0,0,0));
cv::dilate(cv::cvarrToMat(mask), cv::cvarrToMat(band), el_cross, cv::Point(1, 1));
f.setTo(Scalar(KNOWN,0,0,0));
t.setTo(Scalar(1.0e6f,0,0,0));
cv::dilate(mask, band, el_cross, cv::Point(1, 1));
Heap=cv::makePtr<CvPriorityQueueFloat>();
cvSub(band,mask,band,NULL);
subtract(band, mask, band);
SET_BORDER1_C1(band,uchar,0);
if (!Heap->Add(band))
return;
cvSet(f,cvScalar(BAND,0,0,0),band);
cvSet(f,cvScalar(INSIDE,0,0,0),mask);
cvSet(t,cvScalar(0,0,0,0),band);
f.setTo(Scalar(BAND,0,0,0),band);
f.setTo(Scalar(INSIDE,0,0,0),mask);
t.setTo(Scalar(0,0,0,0),band);
if( flags == cv::INPAINT_TELEA )
{
out.reset(cvCreateMat(erows, ecols, CV_8UC1));
out.create(erows, ecols, CV_8UC1);
el_range = cv::getStructuringElement(cv::MORPH_RECT, cv::Size(2 * range + 1, 2 * range + 1));
cv::dilate(cv::cvarrToMat(mask), cv::cvarrToMat(out), el_range);
cvSub(out,mask,out,NULL);
cv::dilate(mask, out, el_range);
subtract(out, mask, out);
Out=cv::makePtr<CvPriorityQueueFloat>();
if (!Out->Add(band))
return;
cvSub(out,band,out,NULL);
subtract(out, band, out);
SET_BORDER1_C1(out,uchar,0);
icvCalcFMM(out,t,Out,true);
switch(CV_MAT_DEPTH(output_img->type))
switch(output_img.depth())
{
case CV_8U:
icvTeleaInpaintFMM<uchar>(mask,t,output_img,range,Heap);
@ -773,7 +769,7 @@ icvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_
}
}
else if (flags == cv::INPAINT_NS) {
switch(CV_MAT_DEPTH(output_img->type))
switch(output_img.depth())
{
case CV_8U:
icvNSInpaintFMM<uchar>(mask,t,output_img,range,Heap);
@ -788,7 +784,7 @@ icvInpaint( const CvArr* _input_img, const CvArr* _inpaint_mask, CvArr* _output_
CV_Error( cv::Error::StsBadArg, "Unsupported format of the input image" );
}
} else {
CV_Error( cv::Error::StsBadArg, "The flags argument must be one of CV_INPAINT_TELEA or CV_INPAINT_NS" );
CV_Error( cv::Error::StsBadArg, "The flags argument must be one of INPAINT_TELEA or INPAINT_NS" );
}
}
@ -800,6 +796,5 @@ void cv::inpaint( InputArray _src, InputArray _mask, OutputArray _dst,
Mat src = _src.getMat(), mask = _mask.getMat();
_dst.create( src.size(), src.type() );
Mat dst = _dst.getMat();
CvMat c_src = cvMat(src), c_mask = cvMat(mask), c_dst = cvMat(dst);
icvInpaint( &c_src, &c_mask, &c_dst, inpaintRange, flags );
icvInpaint( src, mask, dst, inpaintRange, flags );
}

View File

@ -27,7 +27,9 @@
//! @{
/**
@brief Lucas-Kanade optical flow for single pyramid layer. See calcOpticalFlowPyrLK
@brief Lucas-Kanade optical flow for single pyramid layer. See calcOpticalFlowPyrLK.
@note OpenCV builds pyramid levels with `win_size` padding. Out-of-bound access to source
image data is legal within `+-win_size` range.
@param prev_data previous frame image data
@param prev_data_step previous frame image data step
@param prev_deriv_data previous frame Schaar derivatives
@ -67,6 +69,29 @@ inline int hal_ni_LKOpticalFlowLevel(const uchar *prev_data, size_t prev_data_st
#define cv_hal_LKOpticalFlowLevel hal_ni_LKOpticalFlowLevel
//! @endcond
/**
@brief Computes Schaar derivatives with inteleaved layout xyxy...
@note OpenCV builds pyramid levels with `win_size` padding. Out-of-bound access to source
image data is legal within `+-win_size` range.
@param src_data source image data
@param src_step source image step
@param dst_data destination buffer data
@param dst_step destination buffer step
@param width image width
@param height image height
@param cn source image channels
**/
inline int hal_ni_ScharrDeriv(const uchar* src_data, size_t src_step,
short* dst_data, size_t dst_step,
int width, int height, int cn)
{
return CV_HAL_ERROR_NOT_IMPLEMENTED;
}
//! @cond IGNORED
#define cv_hal_ScharrDeriv hal_ni_ScharrDeriv
//! @endcond
//! @}
#if defined(__clang__)

View File

@ -63,6 +63,9 @@ static void calcScharrDeriv(const cv::Mat& src, cv::Mat& dst)
int rows = src.rows, cols = src.cols, cn = src.channels(), depth = src.depth();
CV_Assert(depth == CV_8U);
dst.create(rows, cols, CV_MAKETYPE(DataType<deriv_type>::depth, cn*2));
CALL_HAL(ScharrDeriv, cv_hal_ScharrDeriv, src.data, src.step, (short*)dst.data, dst.step, cols, rows, cn);
parallel_for_(Range(0, rows), cv::detail::ScharrDerivInvoker(src, dst), cv::getNumThreads());
}

View File

@ -9,9 +9,11 @@
# error this is a private header which should not be used from outside of the OpenCV library
#endif
#include "opencv2/core/cvdef.h"
#include "opencv2/videoio/videoio_c.h"
#include "opencv2/core/types.hpp"
#include <deque>
#include <vector>
#include <string>
#include <memory>
namespace cv
{
@ -80,8 +82,8 @@ class CV_EXPORTS AVIReadContainer
public:
AVIReadContainer();
void initStream(const String& filename);
void initStream(Ptr<VideoInputStream> m_file_stream_);
void initStream(const std::string& filename);
void initStream(std::shared_ptr<VideoInputStream> m_file_stream_);
void close();
//stores founded frames in m_frame_list which can be accessed via getFrames
@ -121,7 +123,7 @@ protected:
void printError(RiffChunk& chunk, unsigned int expected_fourcc);
Ptr<VideoInputStream> m_file_stream;
std::shared_ptr<VideoInputStream> m_file_stream;
unsigned int m_stream_id;
unsigned long long int m_movi_start;
unsigned long long int m_movi_end;
@ -150,7 +152,7 @@ public:
AVIWriteContainer();
~AVIWriteContainer();
bool initContainer(const String& filename, double fps, Size size, bool iscolor);
bool initContainer(const std::string& filename, double fps, cv::Size size, bool iscolor);
void startWriteAVI(int stream_count);
void writeStreamHeader(Codecs codec_);
void startWriteChunk(uint32_t fourcc);
@ -180,7 +182,7 @@ public:
void jflushStream(unsigned currval, int bitIdx);
private:
Ptr<BitStream> strm;
std::shared_ptr<BitStream> strm;
int outfps;
int width, height, channels;
size_t moviPointer;

View File

@ -5,6 +5,8 @@
#ifndef OPENCV_VIDEOIO_LEGACY_CONSTANTS_H
#define OPENCV_VIDEOIO_LEGACY_CONSTANTS_H
#include "opencv2/core/cvdef.h"
enum
{
CV_CAP_ANY =0, // autodetect
@ -410,22 +412,6 @@ enum
CV_CAP_PROP_VIEWFINDER = 17010 // Enter liveview mode.
};
//! Macro to construct the fourcc code of the codec. Same as CV_FOURCC()
#define CV_FOURCC_MACRO(c1, c2, c3, c4) (((c1) & 255) + (((c2) & 255) << 8) + (((c3) & 255) << 16) + (((c4) & 255) << 24))
/** @brief Constructs the fourcc code of the codec function
Simply call it with 4 chars fourcc code like `CV_FOURCC('I', 'Y', 'U', 'V')`
List of codes can be obtained at [Video Codecs by FOURCC](https://fourcc.org/codecs.php) page.
FFMPEG backend with MP4 container natively uses other values as fourcc code:
see [ObjectType](http://mp4ra.org/#/codecs).
*/
CV_INLINE int CV_FOURCC(char c1, char c2, char c3, char c4)
{
return CV_FOURCC_MACRO(c1, c2, c3, c4);
}
//! (Windows only) Open Codec Selection Dialog
#define CV_FOURCC_PROMPT -1
//! (Linux only) Use default codec for specified filename

View File

@ -403,17 +403,17 @@ public:
double getProperty(int property_id) const CV_OVERRIDE
{
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
return isOpened() ? frameWidth : desiredWidth;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
return isOpened() ? frameHeight : desiredHeight;
case CAP_PROP_AUTO_EXPOSURE:
return autoExposure ? 1 : 0;
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
return exposureTime;
case CV_CAP_PROP_ISO_SPEED:
case CAP_PROP_ISO_SPEED:
return sensitivity;
case CV_CAP_PROP_FOURCC:
case CAP_PROP_FOURCC:
return fourCC;
default:
break;
@ -425,7 +425,7 @@ public:
bool setProperty(int property_id, double value) CV_OVERRIDE
{
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
desiredWidth = value;
settingWidth = true;
if (settingWidth && settingHeight) {
@ -434,7 +434,7 @@ public:
settingHeight = false;
}
return true;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
desiredHeight = value;
settingHeight = true;
if (settingWidth && settingHeight) {
@ -443,7 +443,7 @@ public:
settingHeight = false;
}
return true;
case CV_CAP_PROP_FOURCC:
case CAP_PROP_FOURCC:
{
uint32_t newFourCC = cvRound(value);
if (fourCC == newFourCC) {
@ -485,18 +485,18 @@ public:
return status == ACAMERA_OK;
}
return true;
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
if (isOpened() && exposureRange.Supported()) {
exposureTime = (int64_t)value;
LOGI("Setting CV_CAP_PROP_EXPOSURE will have no effect unless CAP_PROP_AUTO_EXPOSURE is off");
LOGI("Setting CAP_PROP_EXPOSURE will have no effect unless CAP_PROP_AUTO_EXPOSURE is off");
camera_status_t status = ACaptureRequest_setEntry_i64(captureRequest.get(), ACAMERA_SENSOR_EXPOSURE_TIME, 1, &exposureTime);
return status == ACAMERA_OK;
}
return false;
case CV_CAP_PROP_ISO_SPEED:
case CAP_PROP_ISO_SPEED:
if (isOpened() && sensitivityRange.Supported()) {
sensitivity = (int32_t)value;
LOGI("Setting CV_CAP_PROP_ISO_SPEED will have no effect unless CAP_PROP_AUTO_EXPOSURE is off");
LOGI("Setting CAP_PROP_ISO_SPEED will have no effect unless CAP_PROP_AUTO_EXPOSURE is off");
camera_status_t status = ACaptureRequest_setEntry_i32(captureRequest.get(), ACAMERA_SENSOR_SENSITIVITY, 1, &sensitivity);
return status == ACAMERA_OK;
}

View File

@ -182,16 +182,16 @@ public:
{
switch (property_id)
{
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
return (( videoOrientationAuto &&
(cv::ROTATE_90_CLOCKWISE == videoRotationCode || cv::ROTATE_90_COUNTERCLOCKWISE == videoRotationCode))
? videoHeight : videoWidth);
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
return (( videoOrientationAuto &&
(cv::ROTATE_90_CLOCKWISE == videoRotationCode || cv::ROTATE_90_COUNTERCLOCKWISE == videoRotationCode))
? videoWidth : videoHeight);
case CV_CAP_PROP_FPS: return videoFrameRate;
case CV_CAP_PROP_FRAME_COUNT: return videoFrameCount;
case CAP_PROP_FPS: return videoFrameRate;
case CAP_PROP_FRAME_COUNT: return videoFrameCount;
case CAP_PROP_ORIENTATION_META: return videoRotation;
case CAP_PROP_ORIENTATION_AUTO: return videoOrientationAuto ? 1 : 0;
}
@ -661,7 +661,7 @@ const AndroidMediaNdkVideoWriter::FourCCInfo AndroidMediaNdkVideoWriter::FOURCC_
{ CV_FOURCC('H', '2', '6', '5'), "video/hevc", AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4 },
{ CV_FOURCC('H', '2', '6', '3'), "video/3gpp", AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4 },
{ CV_FOURCC('M', 'P', '4', 'V'), "video/mp4v-es", AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4 },
{ 0, NULL },
{ 0, NULL, AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4 },
};

View File

@ -46,6 +46,8 @@
#include "precomp.hpp"
#include "cap_interface.hpp"
using namespace cv;
#ifdef HAVE_ARAVIS_API
#include <arv.h>
@ -93,25 +95,26 @@
/********************* Capturing video from camera via Aravis *********************/
class CvCaptureCAM_Aravis : public CvCapture
class CvCaptureCAM_Aravis : public IVideoCapture
{
public:
CvCaptureCAM_Aravis();
virtual ~CvCaptureCAM_Aravis()
~CvCaptureCAM_Aravis()
{
close();
}
virtual bool open(int);
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE
bool open(int);
void close();
double getProperty(int) const CV_OVERRIDE;
bool setProperty(int, double) CV_OVERRIDE;
bool grabFrame() CV_OVERRIDE;
bool retrieveFrame(int, OutputArray) CV_OVERRIDE;
int getCaptureDomain() CV_OVERRIDE
{
return cv::CAP_ARAVIS;
}
bool isOpened() const CV_OVERRIDE { return stream != NULL; }
protected:
bool create(int);
@ -122,7 +125,7 @@ protected:
bool getDeviceNameById(int id, std::string &device);
void autoExposureControl(IplImage*);
void autoExposureControl(const Mat &);
ArvCamera *camera; // Camera to control.
ArvStream *stream; // Object for video stream reception.
@ -167,8 +170,6 @@ protected:
unsigned frameID; // current frame id
unsigned prevFrameID;
IplImage *frame; // local frame copy
};
@ -190,7 +191,6 @@ CvCaptureCAM_Aravis::CvCaptureCAM_Aravis()
allowAutoTrigger = false;
num_buffers = 10;
frame = NULL;
}
void CvCaptureCAM_Aravis::close()
@ -314,51 +314,37 @@ bool CvCaptureCAM_Aravis::grabFrame()
return false;
}
IplImage* CvCaptureCAM_Aravis::retrieveFrame(int)
bool CvCaptureCAM_Aravis::retrieveFrame(int, OutputArray arr)
{
if(framebuffer) {
int depth = 0, channels = 0;
switch(pixelFormat) {
case ARV_PIXEL_FORMAT_MONO_8:
case ARV_PIXEL_FORMAT_BAYER_GR_8:
depth = IPL_DEPTH_8U;
depth = CV_8U;
channels = 1;
break;
case ARV_PIXEL_FORMAT_MONO_12:
case ARV_PIXEL_FORMAT_MONO_16:
depth = IPL_DEPTH_16U;
depth = CV_16U;
channels = 1;
break;
default:
return false;
}
if(depth && channels) {
IplImage src;
cvInitImageHeader( &src, cvSize( width, height ), depth, channels, IPL_ORIGIN_TL, 4 );
cvSetData( &src, framebuffer, src.widthStep );
if( !frame ||
frame->width != src.width ||
frame->height != src.height ||
frame->depth != src.depth ||
frame->nChannels != src.nChannels) {
cvReleaseImage( &frame );
frame = cvCreateImage( cvGetSize(&src), src.depth, channels );
}
cvCopy(&src, frame);
if(controlExposure && ((frameID - prevFrameID) >= 3)) {
// control exposure every third frame
// i.e. skip frame taken with previous exposure setup
autoExposureControl(frame);
}
return frame;
Mat src(Size( width, height ), CV_MAKE_TYPE(depth, channels), framebuffer);
if(controlExposure && ((frameID - prevFrameID) >= 3)) {
// control exposure every third frame
// i.e. skip frame taken with previous exposure setup
autoExposureControl(src);
}
src.copyTo(arr);
return true;
}
return NULL;
return false;
}
void CvCaptureCAM_Aravis::autoExposureControl(IplImage* image)
void CvCaptureCAM_Aravis::autoExposureControl(const Mat & image)
{
// Software control of exposure parameters utilizing
// automatic change of exposure time & gain
@ -367,10 +353,8 @@ void CvCaptureCAM_Aravis::autoExposureControl(IplImage* image)
// - to increase brightness, first increase time then gain
// - to decrease brightness, first decrease gain then time
cv::Mat m = cv::cvarrToMat(image);
// calc mean value for luminance or green channel
double brightness = cv::mean(m)[image->nChannels > 1 ? 1 : 0];
double brightness = cv::mean(image)[image.channels() > 1 ? 1 : 0];
if(brightness < 1) brightness = 1;
// mid point - 100 % means no change
@ -437,41 +421,41 @@ void CvCaptureCAM_Aravis::autoExposureControl(IplImage* image)
double CvCaptureCAM_Aravis::getProperty( int property_id ) const
{
switch(property_id) {
case CV_CAP_PROP_POS_MSEC:
case CAP_PROP_POS_MSEC:
return (double)frameID/fps;
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
return width;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
return height;
case CV_CAP_PROP_AUTO_EXPOSURE:
case CAP_PROP_AUTO_EXPOSURE:
return (controlExposure ? 1 : 0);
case CV_CAP_PROP_BRIGHTNESS:
case CAP_PROP_BRIGHTNESS:
return exposureCompensation;
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
if(exposureAvailable) {
/* exposure time in seconds, like 1/100 s */
return arv_camera_get_exposure_time(camera, NULL) / 1e6;
}
break;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
if(fpsAvailable) {
return arv_camera_get_frame_rate(camera, NULL);
}
break;
case CV_CAP_PROP_GAIN:
case CAP_PROP_GAIN:
if(gainAvailable) {
return arv_camera_get_gain(camera, NULL);
}
break;
case CV_CAP_PROP_FOURCC:
case CAP_PROP_FOURCC:
{
ArvPixelFormat currFormat = arv_camera_get_pixel_format(camera, NULL);
switch( currFormat ) {
@ -487,7 +471,7 @@ double CvCaptureCAM_Aravis::getProperty( int property_id ) const
}
break;
case CV_CAP_PROP_BUFFERSIZE:
case CAP_PROP_BUFFERSIZE:
if(stream) {
int in, out;
arv_stream_get_n_buffers(stream, &in, &out);
@ -508,7 +492,7 @@ double CvCaptureCAM_Aravis::getProperty( int property_id ) const
bool CvCaptureCAM_Aravis::setProperty( int property_id, double value )
{
switch(property_id) {
case CV_CAP_PROP_AUTO_EXPOSURE:
case CAP_PROP_AUTO_EXPOSURE:
if(exposureAvailable || gainAvailable) {
if( (controlExposure = (bool)(int)value) ) {
exposure = exposureAvailable ? arv_camera_get_exposure_time(camera, NULL) : 0;
@ -516,11 +500,11 @@ bool CvCaptureCAM_Aravis::setProperty( int property_id, double value )
}
}
break;
case CV_CAP_PROP_BRIGHTNESS:
case CAP_PROP_BRIGHTNESS:
exposureCompensation = CLIP(value, -3., 3.);
break;
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
if(exposureAvailable) {
/* exposure time in seconds, like 1/100 s */
value *= 1e6; // -> from s to us
@ -529,13 +513,13 @@ bool CvCaptureCAM_Aravis::setProperty( int property_id, double value )
break;
} else return false;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
if(fpsAvailable) {
arv_camera_set_frame_rate(camera, fps = CLIP(value, fpsMin, fpsMax), NULL);
break;
} else return false;
case CV_CAP_PROP_GAIN:
case CAP_PROP_GAIN:
if(gainAvailable) {
if ( (autoGain = (-1 == value) ) )
break;
@ -544,7 +528,7 @@ bool CvCaptureCAM_Aravis::setProperty( int property_id, double value )
break;
} else return false;
case CV_CAP_PROP_FOURCC:
case CAP_PROP_FOURCC:
{
ArvPixelFormat newFormat = pixelFormat;
switch((int)value) {
@ -574,7 +558,7 @@ bool CvCaptureCAM_Aravis::setProperty( int property_id, double value )
}
break;
case CV_CAP_PROP_BUFFERSIZE:
case CAP_PROP_BUFFERSIZE:
{
int x = (int)value;
if((x > 0) && (x != num_buffers)) {
@ -621,13 +605,10 @@ bool CvCaptureCAM_Aravis::startCapture()
cv::Ptr<cv::IVideoCapture> cv::create_Aravis_capture( int index )
{
CvCaptureCAM_Aravis* capture = new CvCaptureCAM_Aravis;
Ptr<CvCaptureCAM_Aravis> capture = makePtr<CvCaptureCAM_Aravis>();
if(capture->open(index)) {
return cv::makePtr<cv::LegacyCapture>(capture);
return capture;
}
delete capture;
return NULL;
}
#endif

View File

@ -45,6 +45,7 @@
#define CV_CAP_MODE_GRAY CV_FOURCC_MACRO('G','R','E','Y')
#define CV_CAP_MODE_YUYV CV_FOURCC_MACRO('Y', 'U', 'Y', 'V')
/********************** Declaration of class headers ************************/
/*****************************************************************************
@ -54,7 +55,7 @@
* CaptureDelegate is notified on a separate thread by the OS whenever there
* is a new frame. When "updateImage" is called from the main thread, it
* copies this new frame into an IplImage, but only if this frame has not
* been copied before. When "getOutput" is called from the main thread,
* been copied before. When "getImage" is called from the main thread,
* it gives the last copied IplImage.
*
*****************************************************************************/
@ -67,11 +68,8 @@
{
int newFrame;
CVImageBufferRef mCurrentImageBuffer;
char* imagedata;
IplImage* image;
char* bgr_imagedata;
IplImage* bgr_image;
IplImage* bgr_image_r90;
cv::Mat bgr_image;
cv::Mat bgr_image_r90;
size_t currSize;
}
@ -80,8 +78,8 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection;
- (int)updateImage;
- (IplImage*)getOutput;
- (bool)updateImage;
- (cv::Mat)getImage;
@end
@ -93,18 +91,17 @@ fromConnection:(AVCaptureConnection *)connection;
*
*****************************************************************************/
class CvCaptureCAM : public CvCapture {
class CvCaptureCAM : public cv::IVideoCapture {
public:
CvCaptureCAM(int cameraNum = -1) ;
~CvCaptureCAM();
bool grabFrame() CV_OVERRIDE;
IplImage* retrieveFrame(int) CV_OVERRIDE;
bool retrieveFrame(int, cv::OutputArray) CV_OVERRIDE;
double getProperty(int property_id) const CV_OVERRIDE;
bool setProperty(int property_id, double value) CV_OVERRIDE;
int getCaptureDomain() /*const*/ CV_OVERRIDE { return cv::CAP_AVFOUNDATION; }
bool isOpened() const CV_OVERRIDE { return started; }
virtual IplImage* queryFrame();
virtual int didStart();
private:
AVCaptureSession *mCaptureSession;
AVCaptureDeviceInput *mCaptureDeviceInput;
@ -137,17 +134,17 @@ class CvCaptureCAM : public CvCapture {
*
*****************************************************************************/
class CvCaptureFile : public CvCapture {
class CvCaptureFile : public cv::IVideoCapture {
public:
CvCaptureFile(const char* filename) ;
~CvCaptureFile();
bool grabFrame() CV_OVERRIDE;
IplImage* retrieveFrame(int) CV_OVERRIDE;
bool retrieveFrame(int, cv::OutputArray) CV_OVERRIDE;
double getProperty(int property_id) const CV_OVERRIDE;
bool setProperty(int property_id, double value) CV_OVERRIDE;
int getCaptureDomain() /*const*/ CV_OVERRIDE { return cv::CAP_AVFOUNDATION; }
bool isOpened() const CV_OVERRIDE { return started; }
virtual int didStart();
private:
AVAsset *mAsset;
AVAssetTrack *mAssetTrack;
@ -156,16 +153,14 @@ private:
CMSampleBufferRef mCurrentSampleBuffer;
CVImageBufferRef mGrabbedPixels;
IplImage *mDeviceImage;
uint8_t *mOutImagedata;
IplImage *mOutImage;
cv::Mat mOutImage;
size_t currSize;
uint32_t mMode;
int mFormat;
void handleTracks(NSArray<AVAssetTrack *>* tracks, const char* filename);
bool setupReadingAt(CMTime position);
IplImage* retrieveFramePixelBuffer();
cv::Mat retrieveFramePixelBuffer();
int getPreferredOrientationDegrees() const;
CMTime mFrameTimestamp;
@ -177,22 +172,23 @@ private:
/*****************************************************************************
*
* CvCaptureFile Declaration.
* CvVideoWriter_AVFoundation Declaration.
*
* CvCaptureFile is the instantiation of a capture source for video files.
* CvVideoWriter_AVFoundation is the instantiation of a video output class.
*
*****************************************************************************/
class CvVideoWriter_AVFoundation : public CvVideoWriter{
class CvVideoWriter_AVFoundation : public cv::IVideoWriter{
public:
CvVideoWriter_AVFoundation(const char* filename, int fourcc,
double fps, CvSize frame_size,
double fps, const cv::Size& frame_size,
int is_color=1);
~CvVideoWriter_AVFoundation();
bool writeFrame(const IplImage* image) CV_OVERRIDE;
bool isOpened() const CV_OVERRIDE { return mMovieWriter != NULL && mMovieWriter.status != AVAssetWriterStatusFailed; }
void write(cv::InputArray image) CV_OVERRIDE;
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_AVFOUNDATION; }
private:
IplImage* argbimage;
cv::Mat argbimage;
AVAssetWriter *mMovieWriter;
AVAssetWriterInput* mMovieWriterInput;
@ -202,7 +198,7 @@ class CvVideoWriter_AVFoundation : public CvVideoWriter{
NSString* codec;
NSString* fileType;
double movieFPS;
CvSize movieSize;
cv::Size movieSize;
int movieColor;
unsigned long frameCount;
};
@ -213,24 +209,21 @@ class CvVideoWriter_AVFoundation : public CvVideoWriter{
cv::Ptr<cv::IVideoCapture> cv::create_AVFoundation_capture_file(const std::string &filename)
{
CvCaptureFile *retval = new CvCaptureFile(filename.c_str());
if(retval->didStart())
return makePtr<LegacyCapture>(retval);
delete retval;
cv::Ptr<CvCaptureFile> retval = cv::makePtr<CvCaptureFile>(filename.c_str());
if(retval->isOpened())
return retval;
return NULL;
}
cv::Ptr<cv::IVideoCapture> cv::create_AVFoundation_capture_cam(int index)
{
#if !TARGET_OS_VISION
CvCaptureCAM* retval = new CvCaptureCAM(index);
if (retval->didStart())
return cv::makePtr<cv::LegacyCapture>(retval);
delete retval;
cv::Ptr<CvCaptureCAM> retval = cv::makePtr<CvCaptureCAM>(index);
if (retval->isOpened())
return retval;
#endif
return 0;
return NULL;
}
@ -238,10 +231,11 @@ cv::Ptr<cv::IVideoWriter> cv::create_AVFoundation_writer(const std::string& file
double fps, const cv::Size &frameSize,
const cv::VideoWriterParameters& params)
{
CvSize sz = { frameSize.width, frameSize.height };
const bool isColor = params.get(VIDEOWRITER_PROP_IS_COLOR, true);
CvVideoWriter_AVFoundation* wrt = new CvVideoWriter_AVFoundation(filename.c_str(), fourcc, fps, sz, isColor);
return cv::makePtr<cv::LegacyWriter>(wrt);
cv::Ptr<CvVideoWriter_AVFoundation> wrt = cv::makePtr<CvVideoWriter_AVFoundation>(filename.c_str(), fourcc, fps, frameSize, isColor);
if (wrt->isOpened())
return wrt;
return NULL;
}
/********************** Implementation of Classes ****************************/
@ -283,11 +277,6 @@ CvCaptureCAM::~CvCaptureCAM() {
//cout << "Cleaned up camera." << endl;
}
int CvCaptureCAM::didStart() {
return started;
}
bool CvCaptureCAM::grabFrame() {
return grabFrame(5);
}
@ -309,20 +298,12 @@ bool CvCaptureCAM::grabFrame(double timeOut) {
return total <= timeOut;
}
IplImage* CvCaptureCAM::retrieveFrame(int) {
return [capture getOutput];
}
IplImage* CvCaptureCAM::queryFrame() {
while (!grabFrame()) {
std::cout << "WARNING: Couldn't grab new frame from camera!!!" << std::endl;
/*
cout << "Attempting to restart camera; set capture property DISABLE_AUTO_RESTART to disable." << endl;
stopCaptureDevice();
startCaptureDevice(camNum);
*/
}
return retrieveFrame(0);
bool CvCaptureCAM::retrieveFrame(int, cv::OutputArray arr) {
cv::Mat img = [capture getImage];
if (img.empty())
return false;
img.copyTo(arr);
return true;
}
void CvCaptureCAM::stopCaptureDevice() {
@ -458,11 +439,11 @@ void CvCaptureCAM::setWidthHeight() {
//added macros into headers in videoio_c.h
/*
#define CV_CAP_PROP_IOS_DEVICE_FOCUS 9001
#define CV_CAP_PROP_IOS_DEVICE_EXPOSURE 9002
#define CV_CAP_PROP_IOS_DEVICE_FLASH 9003
#define CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE 9004
#define CV_CAP_PROP_IOS_DEVICE_TORCH 9005
#define CAP_PROP_IOS_DEVICE_FOCUS 9001
#define CAP_PROP_IOS_DEVICE_EXPOSURE 9002
#define CAP_PROP_IOS_DEVICE_FLASH 9003
#define CAP_PROP_IOS_DEVICE_WHITEBALANCE 9004
#define CAP_PROP_IOS_DEVICE_TORCH 9005
*/
@ -523,20 +504,20 @@ double CvCaptureCAM::getProperty(int property_id) const{
[localpool drain];
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
case cv::CAP_PROP_FRAME_WIDTH:
return w;
case CV_CAP_PROP_FRAME_HEIGHT:
case cv::CAP_PROP_FRAME_HEIGHT:
return h;
case CV_CAP_PROP_IOS_DEVICE_FOCUS:
case cv::CAP_PROP_IOS_DEVICE_FOCUS:
return mCaptureDevice.focusMode;
case CV_CAP_PROP_IOS_DEVICE_EXPOSURE:
case cv::CAP_PROP_IOS_DEVICE_EXPOSURE:
return mCaptureDevice.exposureMode;
case CV_CAP_PROP_IOS_DEVICE_FLASH:
case cv::CAP_PROP_IOS_DEVICE_FLASH:
return mCaptureDevice.flashMode;
case CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE:
case cv::CAP_PROP_IOS_DEVICE_WHITEBALANCE:
return mCaptureDevice.whiteBalanceMode;
case CV_CAP_PROP_IOS_DEVICE_TORCH:
case cv::CAP_PROP_IOS_DEVICE_TORCH:
return mCaptureDevice.torchMode;
default:
@ -548,7 +529,7 @@ double CvCaptureCAM::getProperty(int property_id) const{
bool CvCaptureCAM::setProperty(int property_id, double value) {
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
case cv::CAP_PROP_FRAME_WIDTH:
width = value;
settingWidth = 1;
if (settingWidth && settingHeight) {
@ -558,7 +539,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
}
return true;
case CV_CAP_PROP_FRAME_HEIGHT:
case cv::CAP_PROP_FRAME_HEIGHT:
height = value;
settingHeight = 1;
if (settingWidth && settingHeight) {
@ -568,7 +549,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
}
return true;
case CV_CAP_PROP_IOS_DEVICE_FOCUS:
case cv::CAP_PROP_IOS_DEVICE_FOCUS:
if ([mCaptureDevice isFocusModeSupported:(AVCaptureFocusMode)value]){
NSError* error = nil;
[mCaptureDevice lockForConfiguration:&error];
@ -581,7 +562,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
return false;
}
case CV_CAP_PROP_IOS_DEVICE_EXPOSURE:
case cv::CAP_PROP_IOS_DEVICE_EXPOSURE:
if ([mCaptureDevice isExposureModeSupported:(AVCaptureExposureMode)value]){
NSError* error = nil;
[mCaptureDevice lockForConfiguration:&error];
@ -594,7 +575,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
return false;
}
case CV_CAP_PROP_IOS_DEVICE_FLASH:
case cv::CAP_PROP_IOS_DEVICE_FLASH:
if ( [mCaptureDevice hasFlash] && [mCaptureDevice isFlashModeSupported:(AVCaptureFlashMode)value]){
NSError* error = nil;
[mCaptureDevice lockForConfiguration:&error];
@ -607,7 +588,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
return false;
}
case CV_CAP_PROP_IOS_DEVICE_WHITEBALANCE:
case cv::CAP_PROP_IOS_DEVICE_WHITEBALANCE:
if ([mCaptureDevice isWhiteBalanceModeSupported:(AVCaptureWhiteBalanceMode)value]){
NSError* error = nil;
[mCaptureDevice lockForConfiguration:&error];
@ -620,7 +601,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
return false;
}
case CV_CAP_PROP_IOS_DEVICE_TORCH:
case cv::CAP_PROP_IOS_DEVICE_TORCH:
if ([mCaptureDevice hasFlash] && [mCaptureDevice isTorchModeSupported:(AVCaptureTorchMode)value]){
NSError* error = nil;
[mCaptureDevice lockForConfiguration:&error];
@ -649,7 +630,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
* CaptureDelegate is notified on a separate thread by the OS whenever there
* is a new frame. When "updateImage" is called from the main thread, it
* copies this new frame into an IplImage, but only if this frame has not
* been copied before. When "getOutput" is called from the main thread,
* been copied before. When "getImage" is called from the main thread,
* it gives the last copied IplImage.
*
*****************************************************************************/
@ -660,22 +641,14 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
- (id)init {
[super init];
newFrame = 0;
imagedata = NULL;
bgr_imagedata = NULL;
currSize = 0;
image = NULL;
bgr_image = NULL;
bgr_image_r90 = NULL;
return self;
}
-(void)dealloc {
if (imagedata != NULL) free(imagedata);
if (bgr_imagedata != NULL) free(bgr_imagedata);
cvReleaseImage(&image);
cvReleaseImage(&bgr_image);
cvReleaseImage(&bgr_image_r90);
bgr_image.release();
bgr_image_r90.release();
[super dealloc];
}
@ -705,13 +678,7 @@ fromConnection:(AVCaptureConnection *)connection{
}
-(IplImage*) getOutput {
//return bgr_image;
return bgr_image_r90;
}
-(int) updateImage {
-(bool) updateImage {
if (newFrame==0) return 0;
CVPixelBufferRef pixels;
@ -721,64 +688,40 @@ fromConnection:(AVCaptureConnection *)connection{
}
CVPixelBufferLockBaseAddress(pixels, 0);
uint32_t* baseaddress = (uint32_t*)CVPixelBufferGetBaseAddress(pixels);
uchar* baseaddress = reinterpret_cast<uchar*>(CVPixelBufferGetBaseAddress(pixels));
size_t width = CVPixelBufferGetWidth(pixels);
size_t height = CVPixelBufferGetHeight(pixels);
cv::Size sz { (int)CVPixelBufferGetWidth(pixels), (int)CVPixelBufferGetHeight(pixels) };
size_t rowBytes = CVPixelBufferGetBytesPerRow(pixels);
OSType pixelFormat = CVPixelBufferGetPixelFormatType(pixels);
if (rowBytes != 0) {
if (currSize != rowBytes*height*sizeof(char)) {
currSize = rowBytes*height*sizeof(char);
if (imagedata != NULL) free(imagedata);
if (bgr_imagedata != NULL) free(bgr_imagedata);
imagedata = (char*)malloc(currSize);
bgr_imagedata = (char*)malloc(currSize);
}
memcpy(imagedata, baseaddress, currSize);
if (image == NULL) {
image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 4);
}
image->width = (int)width;
image->height = (int)height;
image->nChannels = 4;
image->depth = IPL_DEPTH_8U;
image->widthStep = (int)rowBytes;
image->imageData = imagedata;
image->imageSize = (int)currSize;
if (bgr_image == NULL) {
bgr_image = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 3);
}
bgr_image->width = (int)width;
bgr_image->height = (int)height;
bgr_image->nChannels = 3;
bgr_image->depth = IPL_DEPTH_8U;
bgr_image->widthStep = (int)rowBytes;
bgr_image->imageData = bgr_imagedata;
bgr_image->imageSize = (int)currSize;
cv::cvtColor(cv::cvarrToMat(image), cv::cvarrToMat(bgr_image), cv::COLOR_BGRA2BGR);
bool res = false;
if (rowBytes != 0 && pixelFormat == kCVPixelFormatType_32BGRA) {
bgr_image.create(sz, CV_8UC3);
cv::Mat devImage(sz, CV_8UC4, baseaddress, rowBytes);
cv::cvtColor(devImage, bgr_image, cv::COLOR_BGRA2BGR);
// image taken from the buffer is incorrected rotated. I'm using cvTranspose + cvFlip.
// There should be an option in iOS API to rotate the buffer output orientation.
// iOS provides hardware accelerated rotation through AVCaptureConnection class
// I can't get it work.
if (bgr_image_r90 == NULL){
bgr_image_r90 = cvCreateImage(cvSize((int)height, (int)width), IPL_DEPTH_8U, 3);
}
cvTranspose(bgr_image, bgr_image_r90);
cvFlip(bgr_image_r90, NULL, 1);
bgr_image_r90.create(sz, CV_8UC3);
cv::transpose(bgr_image, bgr_image_r90);
cv::flip(bgr_image_r90, bgr_image_r90, 1);
res = true;
} else {
fprintf(stderr, "OpenCV: rowBytes == 0 or unknown pixel format 0x%08X\n", pixelFormat);
bgr_image.create(cv::Size(0, 0), bgr_image.type());
bgr_image_r90.create(cv::Size(0, 0), bgr_image_r90.type());
}
CVPixelBufferUnlockBaseAddress(pixels, 0);
CVBufferRelease(pixels);
return 1;
return res;
}
-(cv::Mat) getImage {
return bgr_image_r90;
}
@end
@ -800,9 +743,6 @@ CvCaptureFile::CvCaptureFile(const char* filename) {
mAssetTrack = nil;
mAssetReader = nil;
mTrackOutput = nil;
mDeviceImage = NULL;
mOutImage = NULL;
mOutImagedata = NULL;
currSize = 0;
mMode = CV_CAP_MODE_BGR;
mFormat = CV_8UC3;
@ -848,9 +788,7 @@ CvCaptureFile::CvCaptureFile(const char* filename) {
CvCaptureFile::~CvCaptureFile() {
NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
free(mOutImagedata);
cvReleaseImage(&mOutImage);
cvReleaseImage(&mDeviceImage);
mOutImage.release();
[mAssetReader release];
[mTrackOutput release];
[mAssetTrack release];
@ -938,10 +876,6 @@ bool CvCaptureFile::setupReadingAt(CMTime position) {
return [mAssetReader startReading];
}
int CvCaptureFile::didStart() {
return started;
}
bool CvCaptureFile::grabFrame() {
NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
@ -960,28 +894,29 @@ bool CvCaptureFile::grabFrame() {
return isReading;
}
IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
cv::Mat CvCaptureFile::retrieveFramePixelBuffer() {
if ( ! mGrabbedPixels ) {
return 0;
return cv::Mat();
}
NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
CVPixelBufferLockBaseAddress(mGrabbedPixels, 0);
void *baseaddress;
size_t width, height, rowBytes;
size_t rowBytes;
cv::Size sz;
OSType pixelFormat = CVPixelBufferGetPixelFormatType(mGrabbedPixels);
if (CVPixelBufferIsPlanar(mGrabbedPixels)) {
baseaddress = CVPixelBufferGetBaseAddressOfPlane(mGrabbedPixels, 0);
width = CVPixelBufferGetWidthOfPlane(mGrabbedPixels, 0);
height = CVPixelBufferGetHeightOfPlane(mGrabbedPixels, 0);
sz.width = CVPixelBufferGetWidthOfPlane(mGrabbedPixels, 0);
sz.height = CVPixelBufferGetHeightOfPlane(mGrabbedPixels, 0);
rowBytes = CVPixelBufferGetBytesPerRowOfPlane(mGrabbedPixels, 0);
} else {
baseaddress = CVPixelBufferGetBaseAddress(mGrabbedPixels);
width = CVPixelBufferGetWidth(mGrabbedPixels);
height = CVPixelBufferGetHeight(mGrabbedPixels);
sz.width = CVPixelBufferGetWidth(mGrabbedPixels);
sz.height = CVPixelBufferGetHeight(mGrabbedPixels);
rowBytes = CVPixelBufferGetBytesPerRow(mGrabbedPixels);
}
@ -990,7 +925,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
return cv::Mat();
}
int outChannels;
@ -1005,26 +940,9 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
return cv::Mat();
}
if ( currSize != width*outChannels*height ) {
currSize = width*outChannels*height;
free(mOutImagedata);
mOutImagedata = reinterpret_cast<uint8_t*>(malloc(currSize));
}
if (mOutImage == NULL) {
mOutImage = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, outChannels);
}
mOutImage->width = int(width);
mOutImage->height = int(height);
mOutImage->nChannels = outChannels;
mOutImage->depth = IPL_DEPTH_8U;
mOutImage->widthStep = int(width*outChannels);
mOutImage->imageData = reinterpret_cast<char *>(mOutImagedata);
mOutImage->imageSize = int(currSize);
int deviceChannels;
int cvtCode;
@ -1042,7 +960,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
return cv::Mat();
}
} else if ( pixelFormat == kCVPixelFormatType_24RGB ) {
deviceChannels = 3;
@ -1050,7 +968,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = cv::COLOR_RGB2BGR;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = 0;
cvtCode = -1;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = cv::COLOR_RGB2GRAY;
} else {
@ -1058,7 +976,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
return cv::Mat();
}
} else if ( pixelFormat == kCVPixelFormatType_422YpCbCr8 ) { // 422 (2vuy, UYVY)
deviceChannels = 2;
@ -1076,11 +994,11 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
return cv::Mat();
}
} else if ( pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange || // 420v
pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange ) { // 420f
height = height * 3 / 2;
sz.height = sz.height * 3 / 2;
deviceChannels = 1;
if (mMode == CV_CAP_MODE_BGR) {
@ -1094,7 +1012,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
return cv::Mat();
}
} else {
char pfBuf[] = { (char)pixelFormat, (char)(pixelFormat >> 8),
@ -1103,24 +1021,15 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
return cv::Mat();
}
if (mDeviceImage == NULL) {
mDeviceImage = cvCreateImageHeader(cvSize(int(width),int(height)), IPL_DEPTH_8U, deviceChannels);
}
mDeviceImage->width = int(width);
mDeviceImage->height = int(height);
mDeviceImage->nChannels = deviceChannels;
mDeviceImage->depth = IPL_DEPTH_8U;
mDeviceImage->widthStep = int(rowBytes);
mDeviceImage->imageData = reinterpret_cast<char *>(baseaddress);
mDeviceImage->imageSize = int(rowBytes*height);
mOutImage.create(sz, CV_MAKE_TYPE(CV_8U, outChannels));
cv::Mat devImage(sz, CV_MAKE_TYPE(CV_8U, deviceChannels), baseaddress, rowBytes);
if (cvtCode == -1) {
cv::cvarrToMat(mDeviceImage).copyTo(cv::cvarrToMat(mOutImage));
devImage.copyTo(mOutImage);
} else {
cv::cvtColor(cv::cvarrToMat(mDeviceImage), cv::cvarrToMat(mOutImage), cvtCode);
cv::cvtColor(devImage, mOutImage, cvtCode);
}
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
@ -1138,8 +1047,12 @@ int CvCaptureFile::getPreferredOrientationDegrees() const {
return static_cast<int>(round(radians * 180 / M_PI));
}
IplImage* CvCaptureFile::retrieveFrame(int) {
return retrieveFramePixelBuffer();
bool CvCaptureFile::retrieveFrame(int, cv::OutputArray arr) {
cv::Mat res = retrieveFramePixelBuffer();
if (res.empty())
return false;
res.copyTo(arr);
return true;
}
double CvCaptureFile::getProperty(int property_id) const{
@ -1148,25 +1061,25 @@ double CvCaptureFile::getProperty(int property_id) const{
CMTime t;
switch (property_id) {
case CV_CAP_PROP_POS_MSEC:
case cv::CAP_PROP_POS_MSEC:
return mFrameTimestamp.value * 1000.0 / mFrameTimestamp.timescale;
case CV_CAP_PROP_POS_FRAMES:
case cv::CAP_PROP_POS_FRAMES:
return mAssetTrack.nominalFrameRate > 0 ? mFrameNum : 0;
case CV_CAP_PROP_POS_AVI_RATIO:
case cv::CAP_PROP_POS_AVI_RATIO:
t = [mAsset duration];
return (mFrameTimestamp.value * t.timescale) / double(mFrameTimestamp.timescale * t.value);
case CV_CAP_PROP_FRAME_WIDTH:
case cv::CAP_PROP_FRAME_WIDTH:
return mAssetTrack.naturalSize.width;
case CV_CAP_PROP_FRAME_HEIGHT:
case cv::CAP_PROP_FRAME_HEIGHT:
return mAssetTrack.naturalSize.height;
case CV_CAP_PROP_FPS:
case cv::CAP_PROP_FPS:
return mAssetTrack.nominalFrameRate;
case CV_CAP_PROP_FRAME_COUNT:
case cv::CAP_PROP_FRAME_COUNT:
t = [mAsset duration];
return round((t.value * mAssetTrack.nominalFrameRate) / double(t.timescale));
case CV_CAP_PROP_FORMAT:
case cv::CAP_PROP_FORMAT:
return mFormat;
case CV_CAP_PROP_FOURCC:
case cv::CAP_PROP_FOURCC:
return mMode;
case cv::CAP_PROP_ORIENTATION_META:
return getPreferredOrientationDegrees();
@ -1186,20 +1099,20 @@ bool CvCaptureFile::setProperty(int property_id, double value) {
CMTime t;
switch (property_id) {
case CV_CAP_PROP_POS_MSEC:
case cv::CAP_PROP_POS_MSEC:
t = mAsset.duration;
t.value = value * t.timescale / 1000;
retval = setupReadingAt(t);
break;
case CV_CAP_PROP_POS_FRAMES:
case cv::CAP_PROP_POS_FRAMES:
retval = mAssetTrack.nominalFrameRate > 0 ? setupReadingAt(CMTimeMake(value, mAssetTrack.nominalFrameRate)) : false;
break;
case CV_CAP_PROP_POS_AVI_RATIO:
case cv::CAP_PROP_POS_AVI_RATIO:
t = mAsset.duration;
t.value = round(t.value * value);
retval = setupReadingAt(t);
break;
case CV_CAP_PROP_FOURCC:
case cv::CAP_PROP_FOURCC:
uint32_t mode;
mode = cvRound(value);
if (mMode == mode) {
@ -1239,7 +1152,7 @@ bool CvCaptureFile::setProperty(int property_id, double value) {
CvVideoWriter_AVFoundation::CvVideoWriter_AVFoundation(const char* filename, int fourcc,
double fps, CvSize frame_size,
double fps, const cv::Size& frame_size,
int is_color) {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
@ -1249,7 +1162,7 @@ CvVideoWriter_AVFoundation::CvVideoWriter_AVFoundation(const char* filename, int
movieFPS = fps;
movieSize = frame_size;
movieColor = is_color;
argbimage = cvCreateImage(movieSize, IPL_DEPTH_8U, 4);
argbimage = cv::Mat(movieSize, CV_8UC4);
path = [[[NSString stringWithCString:filename encoding:NSASCIIStringEncoding] stringByExpandingTildeInPath] retain];
@ -1387,13 +1300,13 @@ CvVideoWriter_AVFoundation::~CvVideoWriter_AVFoundation() {
[path release];
[codec release];
[fileType release];
cvReleaseImage(&argbimage);
argbimage.release();
[localpool drain];
}];
}
bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
void CvVideoWriter_AVFoundation::write(cv::InputArray image) {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
// writer status check
@ -1401,30 +1314,30 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
NSLog(@"[mMovieWriterInput isReadyForMoreMediaData] Not ready for media data or ...");
NSLog(@"mMovieWriter.status: %d. Error: %@", (int)mMovieWriter.status, [mMovieWriter.error localizedDescription]);
[localpool drain];
return false;
return;
}
BOOL success = FALSE;
if (iplimage->height!=movieSize.height || iplimage->width!=movieSize.width){
if (image.size().height!=movieSize.height || image.size().width!=movieSize.width){
std::cout<<"Frame size does not match video size."<<std::endl;
[localpool drain];
return false;
return;
}
if (movieColor) {
//assert(iplimage->nChannels == 3);
cv::cvtColor(cv::cvarrToMat(iplimage), cv::cvarrToMat(argbimage), cv::COLOR_BGR2BGRA);
//assert(image->nChannels == 3);
cv::cvtColor(image, argbimage, cv::COLOR_BGR2BGRA);
}else{
//assert(iplimage->nChannels == 1);
cv::cvtColor(cv::cvarrToMat(iplimage), cv::cvarrToMat(argbimage), cv::COLOR_GRAY2BGRA);
//assert(image->nChannels == 1);
cv::cvtColor(image, argbimage, cv::COLOR_GRAY2BGRA);
}
//IplImage -> CGImage conversion
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
NSData *nsData = [NSData dataWithBytes:argbimage->imageData length:argbimage->imageSize];
NSData *nsData = [NSData dataWithBytes:argbimage.data length:argbimage.total() * argbimage.elemSize()];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((CFDataRef)nsData);
CGImageRef cgImage = CGImageCreate(argbimage->width, argbimage->height,
argbimage->depth, argbimage->depth * argbimage->nChannels, argbimage->widthStep,
CGImageRef cgImage = CGImageCreate(argbimage.size().width, argbimage.size().height,
8, 32, argbimage.step[0],
colorSpace, kCGImageAlphaLast|kCGBitmapByteOrderDefault,
provider, NULL, false, kCGRenderingIntentDefault);
@ -1458,10 +1371,10 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
if (success) {
frameCount ++;
//NSLog(@"Frame #%d", frameCount);
return true;
return;
}else{
NSLog(@"Frame appendPixelBuffer failed.");
return false;
return;
}
}

View File

@ -53,6 +53,7 @@
#define CV_CAP_MODE_GRAY CV_FOURCC_MACRO('G','R','E','Y')
#define CV_CAP_MODE_YUYV CV_FOURCC_MACRO('Y', 'U', 'Y', 'V')
/********************** Declaration of class headers ************************/
/*****************************************************************************
@ -62,8 +63,7 @@
* CaptureDelegate is notified on a separate thread by the OS whenever there
* is a new frame. When "updateImage" is called from the main thread, it
* copies this new frame into an IplImage, but only if this frame has not
* been copied before. When "getOutput" is called from the main thread,
* it gives the last copied IplImage.
* been copied before.
*
*****************************************************************************/
@ -73,9 +73,7 @@
NSCondition *mHasNewFrame;
CVPixelBufferRef mGrabbedPixels;
CVImageBufferRef mCurrentImageBuffer;
IplImage *mDeviceImage;
uint8_t *mOutImagedata;
IplImage *mOutImage;
cv::Mat mOutImage;
size_t currSize;
}
@ -84,8 +82,8 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection;
- (BOOL)grabImageUntilDate: (NSDate *)limit;
- (int)updateImage;
- (IplImage*)getOutput;
- (bool)updateImage;
- (cv::Mat)getImage;
@end
@ -97,17 +95,16 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
*
*****************************************************************************/
class CvCaptureCAM : public CvCapture {
class CvCaptureCAM : public cv::IVideoCapture {
public:
CvCaptureCAM(int cameraNum = -1) ;
~CvCaptureCAM();
bool grabFrame() CV_OVERRIDE;
IplImage* retrieveFrame(int) CV_OVERRIDE;
bool retrieveFrame(int, cv::OutputArray) CV_OVERRIDE;
double getProperty(int property_id) const CV_OVERRIDE;
bool setProperty(int property_id, double value) CV_OVERRIDE;
int getCaptureDomain() /*const*/ CV_OVERRIDE { return cv::CAP_AVFOUNDATION; }
virtual int didStart();
bool isOpened() const CV_OVERRIDE { return started; }
private:
AVCaptureSession *mCaptureSession;
@ -140,17 +137,16 @@ private:
*
*****************************************************************************/
class CvCaptureFile : public CvCapture {
class CvCaptureFile : public cv::VideoCaptureBase {
public:
CvCaptureFile(const char* filename) ;
~CvCaptureFile();
bool grabFrame() CV_OVERRIDE;
IplImage* retrieveFrame(int) CV_OVERRIDE;
double getProperty(int property_id) const CV_OVERRIDE;
bool setProperty(int property_id, double value) CV_OVERRIDE;
bool retrieveFrame_(int, cv::OutputArray) CV_OVERRIDE;
double getProperty_(int property_id) const CV_OVERRIDE;
bool setProperty_(int property_id, double value) CV_OVERRIDE;
int getCaptureDomain() /*const*/ CV_OVERRIDE { return cv::CAP_AVFOUNDATION; }
virtual int didStart();
bool isOpened() const CV_OVERRIDE { return started; }
private:
AVAsset *mAsset;
@ -160,15 +156,13 @@ private:
CMSampleBufferRef mCurrentSampleBuffer;
CVImageBufferRef mGrabbedPixels;
IplImage *mDeviceImage;
uint8_t *mOutImagedata;
IplImage *mOutImage;
cv::Mat mOutImage;
size_t currSize;
uint32_t mMode;
int mFormat;
bool setupReadingAt(CMTime position);
IplImage* retrieveFramePixelBuffer();
cv::Mat retrieveFramePixelBuffer();
int getPreferredOrientationDegrees() const;
CMTime mFrameTimestamp;
@ -186,18 +180,18 @@ private:
*
*****************************************************************************/
class CvVideoWriter_AVFoundation : public CvVideoWriter {
class CvVideoWriter_AVFoundation : public cv::IVideoWriter {
public:
CvVideoWriter_AVFoundation(const std::string &filename, int fourcc, double fps, CvSize frame_size, int is_color);
CvVideoWriter_AVFoundation(const std::string &filename, int fourcc, double fps, const cv::Size& frame_size, int is_color);
~CvVideoWriter_AVFoundation();
bool writeFrame(const IplImage* image) CV_OVERRIDE;
void write(cv::InputArray image) CV_OVERRIDE;
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_AVFOUNDATION; }
bool isOpened() const
bool isOpened() const CV_OVERRIDE
{
return is_good;
}
private:
IplImage* argbimage;
cv::Mat argbimage;
AVAssetWriter *mMovieWriter;
AVAssetWriterInput* mMovieWriterInput;
@ -207,7 +201,7 @@ class CvVideoWriter_AVFoundation : public CvVideoWriter {
NSString* codec;
NSString* fileType;
double mMovieFPS;
CvSize movieSize;
cv::Size movieSize;
int movieColor;
unsigned long mFrameNum;
bool is_good;
@ -217,35 +211,28 @@ class CvVideoWriter_AVFoundation : public CvVideoWriter {
cv::Ptr<cv::IVideoCapture> cv::create_AVFoundation_capture_file(const std::string &filename)
{
CvCaptureFile *retval = new CvCaptureFile(filename.c_str());
if(retval->didStart())
return makePtr<LegacyCapture>(retval);
delete retval;
cv::Ptr<CvCaptureFile> retval = cv::makePtr<CvCaptureFile>(filename.c_str());
if(retval->isOpened())
return retval;
return NULL;
}
cv::Ptr<cv::IVideoCapture> cv::create_AVFoundation_capture_cam(int index)
{
CvCaptureCAM* retval = new CvCaptureCAM(index);
if (retval->didStart())
return cv::makePtr<cv::LegacyCapture>(retval);
delete retval;
return 0;
cv::Ptr<CvCaptureCAM> retval = cv::makePtr<CvCaptureCAM>(index);
if (retval->isOpened())
return retval;
return NULL;
}
cv::Ptr<cv::IVideoWriter> cv::create_AVFoundation_writer(const std::string& filename, int fourcc,
double fps, const cv::Size& frameSize,
const cv::VideoWriterParameters& params)
{
CvSize sz = { frameSize.width, frameSize.height };
const bool isColor = params.get(VIDEOWRITER_PROP_IS_COLOR, true);
CvVideoWriter_AVFoundation* wrt = new CvVideoWriter_AVFoundation(filename, fourcc, fps, sz, isColor);
const bool isColor = params.get(cv::VIDEOWRITER_PROP_IS_COLOR, true);
cv::Ptr<CvVideoWriter_AVFoundation> wrt = cv::makePtr<CvVideoWriter_AVFoundation>(filename, fourcc, fps, frameSize, isColor);
if (wrt->isOpened())
{
return cv::makePtr<cv::LegacyWriter>(wrt);
}
delete wrt;
return wrt;
return NULL;
}
@ -285,11 +272,6 @@ CvCaptureCAM::~CvCaptureCAM() {
stopCaptureDevice();
}
int CvCaptureCAM::didStart() {
return started;
}
bool CvCaptureCAM::grabFrame() {
return grabFrame(1);
}
@ -300,16 +282,19 @@ bool CvCaptureCAM::grabFrame(double timeOut) {
bool isGrabbed = false;
NSDate *limit = [NSDate dateWithTimeIntervalSinceNow: timeOut];
if ( [mCapture grabImageUntilDate: limit] ) {
[mCapture updateImage];
isGrabbed = true;
isGrabbed = [mCapture updateImage];
}
[localpool drain];
return isGrabbed;
}
IplImage* CvCaptureCAM::retrieveFrame(int) {
return [mCapture getOutput];
bool CvCaptureCAM::retrieveFrame(int, cv::OutputArray arr) {
cv::Mat img = [mCapture getImage];
if (img.empty())
return false;
img.copyTo(arr);
return true;
}
void CvCaptureCAM::stopCaptureDevice() {
@ -494,19 +479,19 @@ double CvCaptureCAM::getProperty(int property_id) const{
double retval = 0;
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
case cv::CAP_PROP_FRAME_WIDTH:
retval = s1.width;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
case cv::CAP_PROP_FRAME_HEIGHT:
retval = s1.height;
break;
case CV_CAP_PROP_FPS:
case cv::CAP_PROP_FPS:
{
CMTime frameDuration = mCaptureDevice.activeVideoMaxFrameDuration;
retval = frameDuration.timescale / double(frameDuration.value);
}
break;
case CV_CAP_PROP_FORMAT:
case cv::CAP_PROP_FORMAT:
retval = CV_8UC3;
break;
default:
@ -523,7 +508,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
bool isSucceeded = false;
switch (property_id) {
case CV_CAP_PROP_FRAME_WIDTH:
case cv::CAP_PROP_FRAME_WIDTH:
width = value;
settingWidth = 1;
if (settingWidth && settingHeight) {
@ -533,7 +518,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
}
isSucceeded = true;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
case cv::CAP_PROP_FRAME_HEIGHT:
height = value;
settingHeight = 1;
if (settingWidth && settingHeight) {
@ -543,7 +528,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
}
isSucceeded = true;
break;
case CV_CAP_PROP_FPS:
case cv::CAP_PROP_FPS:
if ( [mCaptureDevice lockForConfiguration: NULL] ) {
NSArray * ranges = mCaptureDevice.activeFormat.videoSupportedFrameRateRanges;
AVFrameRateRange *matchedRange = ranges[0];
@ -577,8 +562,7 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
* CaptureDelegate is notified on a separate thread by the OS whenever there
* is a new frame. When "updateImage" is called from the main thread, it
* copies this new frame into an IplImage, but only if this frame has not
* been copied before. When "getOutput" is called from the main thread,
* it gives the last copied IplImage.
* been copied before.
*
*****************************************************************************/
@ -590,17 +574,12 @@ bool CvCaptureCAM::setProperty(int property_id, double value) {
mHasNewFrame = [[NSCondition alloc] init];
mCurrentImageBuffer = NULL;
mGrabbedPixels = NULL;
mDeviceImage = NULL;
mOutImagedata = NULL;
mOutImage = NULL;
currSize = 0;
return self;
}
-(void)dealloc {
free(mOutImagedata);
cvReleaseImage(&mOutImage);
cvReleaseImage(&mDeviceImage);
mOutImage.release();
CVBufferRelease(mCurrentImageBuffer);
CVBufferRelease(mGrabbedPixels);
[mHasNewFrame release];
@ -627,10 +606,6 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
}
-(IplImage*) getOutput {
return mOutImage;
}
-(BOOL) grabImageUntilDate: (NSDate *)limit {
BOOL isGrabbed = NO;
[mHasNewFrame lock];
@ -647,89 +622,44 @@ didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
return isGrabbed;
}
-(int) updateImage {
-(bool) updateImage {
if ( ! mGrabbedPixels ) {
return 0;
return false;
}
CVPixelBufferLockBaseAddress(mGrabbedPixels, 0);
void *baseaddress = CVPixelBufferGetBaseAddress(mGrabbedPixels);
uchar *baseaddress = reinterpret_cast<uchar*>(CVPixelBufferGetBaseAddress(mGrabbedPixels));
size_t width = CVPixelBufferGetWidth(mGrabbedPixels);
size_t height = CVPixelBufferGetHeight(mGrabbedPixels);
cv::Size sz { (int)CVPixelBufferGetWidth(mGrabbedPixels), (int)CVPixelBufferGetHeight(mGrabbedPixels) };
size_t rowBytes = CVPixelBufferGetBytesPerRow(mGrabbedPixels);
OSType pixelFormat = CVPixelBufferGetPixelFormatType(mGrabbedPixels);
if ( rowBytes == 0 ) {
fprintf(stderr, "OpenCV: error: rowBytes == 0\n");
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
}
if ( currSize != width*3*height ) {
currSize = width*3*height;
free(mOutImagedata);
mOutImagedata = reinterpret_cast<uint8_t*>(malloc(currSize));
}
if (mOutImage == NULL) {
mOutImage = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, 3);
}
mOutImage->width = int(width);
mOutImage->height = int(height);
mOutImage->nChannels = 3;
mOutImage->depth = IPL_DEPTH_8U;
mOutImage->widthStep = int(width*3);
mOutImage->imageData = reinterpret_cast<char *>(mOutImagedata);
mOutImage->imageSize = int(currSize);
if ( pixelFormat == kCVPixelFormatType_32BGRA ) {
if (mDeviceImage == NULL) {
mDeviceImage = cvCreateImageHeader(cvSize(int(width),int(height)), IPL_DEPTH_8U, 4);
bool res = false;
if (rowBytes != 0 && (pixelFormat == kCVPixelFormatType_32BGRA || pixelFormat == kCVPixelFormatType_422YpCbCr8)) {
mOutImage.create(sz, CV_8UC3);
if ( pixelFormat == kCVPixelFormatType_32BGRA ) {
cv::Mat devImage(sz, CV_8UC4, baseaddress, rowBytes);
cv::cvtColor(devImage, mOutImage, cv::COLOR_BGRA2BGR);
res = true;
} else if ( pixelFormat == kCVPixelFormatType_422YpCbCr8 ) {
cv::Mat devImage(sz, CV_8UC2, baseaddress, rowBytes);
cv::cvtColor(devImage, mOutImage, cv::COLOR_YUV2BGR_UYVY);
res = true;
}
mDeviceImage->width = int(width);
mDeviceImage->height = int(height);
mDeviceImage->nChannels = 4;
mDeviceImage->depth = IPL_DEPTH_8U;
mDeviceImage->widthStep = int(rowBytes);
mDeviceImage->imageData = reinterpret_cast<char *>(baseaddress);
mDeviceImage->imageSize = int(rowBytes*height);
cvtColor(cv::cvarrToMat(mDeviceImage), cv::cvarrToMat(mOutImage), cv::COLOR_BGRA2BGR);
} else if ( pixelFormat == kCVPixelFormatType_422YpCbCr8 ) {
if ( currSize != width*3*height ) {
currSize = width*3*height;
free(mOutImagedata);
mOutImagedata = reinterpret_cast<uint8_t*>(malloc(currSize));
}
if (mDeviceImage == NULL) {
mDeviceImage = cvCreateImageHeader(cvSize(int(width),int(height)), IPL_DEPTH_8U, 2);
}
mDeviceImage->width = int(width);
mDeviceImage->height = int(height);
mDeviceImage->nChannels = 2;
mDeviceImage->depth = IPL_DEPTH_8U;
mDeviceImage->widthStep = int(rowBytes);
mDeviceImage->imageData = reinterpret_cast<char *>(baseaddress);
mDeviceImage->imageSize = int(rowBytes*height);
cvtColor(cv::cvarrToMat(mDeviceImage), cv::cvarrToMat(mOutImage), cv::COLOR_YUV2BGR_UYVY);
} else {
fprintf(stderr, "OpenCV: unknown pixel format 0x%08X\n", pixelFormat);
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
fprintf(stderr, "OpenCV: rowBytes == 0 or unknown pixel format 0x%08X\n", pixelFormat);
mOutImage.create(cv::Size(0, 0), mOutImage.type());
}
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 1;
return res;
}
-(cv::Mat) getImage {
return mOutImage;
}
@end
@ -750,9 +680,6 @@ CvCaptureFile::CvCaptureFile(const char* filename) {
mAssetTrack = nil;
mAssetReader = nil;
mTrackOutput = nil;
mDeviceImage = NULL;
mOutImage = NULL;
mOutImagedata = NULL;
currSize = 0;
mMode = CV_CAP_MODE_BGR;
mFormat = CV_8UC3;
@ -796,9 +723,7 @@ CvCaptureFile::CvCaptureFile(const char* filename) {
CvCaptureFile::~CvCaptureFile() {
NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
free(mOutImagedata);
cvReleaseImage(&mOutImage);
cvReleaseImage(&mDeviceImage);
mOutImage.release();
[mAssetReader release];
[mTrackOutput release];
[mAssetTrack release];
@ -873,10 +798,6 @@ bool CvCaptureFile::setupReadingAt(CMTime position) {
return [mAssetReader startReading];
}
int CvCaptureFile::didStart() {
return started;
}
bool CvCaptureFile::grabFrame() {
NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
@ -896,28 +817,29 @@ bool CvCaptureFile::grabFrame() {
}
IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
cv::Mat CvCaptureFile::retrieveFramePixelBuffer() {
if ( ! mGrabbedPixels ) {
return 0;
return cv::Mat();
}
NSAutoreleasePool *localpool = [[NSAutoreleasePool alloc] init];
CVPixelBufferLockBaseAddress(mGrabbedPixels, 0);
void *baseaddress;
size_t width, height, rowBytes;
uchar *baseaddress;
size_t rowBytes;
cv::Size sz;
OSType pixelFormat = CVPixelBufferGetPixelFormatType(mGrabbedPixels);
if (CVPixelBufferIsPlanar(mGrabbedPixels)) {
baseaddress = CVPixelBufferGetBaseAddressOfPlane(mGrabbedPixels, 0);
width = CVPixelBufferGetWidthOfPlane(mGrabbedPixels, 0);
height = CVPixelBufferGetHeightOfPlane(mGrabbedPixels, 0);
baseaddress = reinterpret_cast<uchar*>(CVPixelBufferGetBaseAddressOfPlane(mGrabbedPixels, 0));
sz.width = CVPixelBufferGetWidthOfPlane(mGrabbedPixels, 0);
sz.height = CVPixelBufferGetHeightOfPlane(mGrabbedPixels, 0);
rowBytes = CVPixelBufferGetBytesPerRowOfPlane(mGrabbedPixels, 0);
} else {
baseaddress = CVPixelBufferGetBaseAddress(mGrabbedPixels);
width = CVPixelBufferGetWidth(mGrabbedPixels);
height = CVPixelBufferGetHeight(mGrabbedPixels);
baseaddress = reinterpret_cast<uchar*>(CVPixelBufferGetBaseAddress(mGrabbedPixels));
sz.width = CVPixelBufferGetWidth(mGrabbedPixels);
sz.height = CVPixelBufferGetHeight(mGrabbedPixels);
rowBytes = CVPixelBufferGetBytesPerRow(mGrabbedPixels);
}
@ -926,7 +848,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
return cv::Mat();
}
// Output image parameters.
@ -942,27 +864,9 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
return cv::Mat();
}
if ( currSize != width*outChannels*height ) {
currSize = width*outChannels*height;
free(mOutImagedata);
mOutImagedata = reinterpret_cast<uint8_t*>(malloc(currSize));
}
// Build the header for the output image.
if (mOutImage == NULL) {
mOutImage = cvCreateImageHeader(cvSize((int)width,(int)height), IPL_DEPTH_8U, outChannels);
}
mOutImage->width = int(width);
mOutImage->height = int(height);
mOutImage->nChannels = outChannels;
mOutImage->depth = IPL_DEPTH_8U;
mOutImage->widthStep = int(width*outChannels);
mOutImage->imageData = reinterpret_cast<char *>(mOutImagedata);
mOutImage->imageSize = int(currSize);
// Device image parameters and conversion code.
// (Not all of these conversions are used in production, but they were all tested to find the fastest options.)
int deviceChannels;
@ -982,7 +886,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
return cv::Mat();
}
} else if ( pixelFormat == kCVPixelFormatType_24RGB ) {
deviceChannels = 3;
@ -990,7 +894,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
if (mMode == CV_CAP_MODE_BGR) {
cvtCode = cv::COLOR_RGB2BGR;
} else if (mMode == CV_CAP_MODE_RGB) {
cvtCode = 0;
cvtCode = -1;
} else if (mMode == CV_CAP_MODE_GRAY) {
cvtCode = cv::COLOR_RGB2GRAY;
} else {
@ -998,7 +902,7 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
return cv::Mat();
}
} else if ( pixelFormat == kCVPixelFormatType_422YpCbCr8 ) { // 422 (2vuy, UYVY)
deviceChannels = 2;
@ -1016,13 +920,13 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
return cv::Mat();
}
} else if ( pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange || // 420v
pixelFormat == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange ) { // 420f
// cvtColor(cv::COLOR_YUV2GRAY_420) is expecting a single buffer with both the Y plane and the CrCb planes.
// So, lie about the height of the buffer. cvtColor(cv::COLOR_YUV2GRAY_420) will only read the first 2/3 of it.
height = height * 3 / 2;
sz.height = sz.height * 3 / 2;
deviceChannels = 1;
if (mMode == CV_CAP_MODE_BGR) {
@ -1036,34 +940,24 @@ IplImage* CvCaptureFile::retrieveFramePixelBuffer() {
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
fprintf(stderr, "OpenCV: unsupported pixel conversion mode\n");
return 0;
return cv::Mat();
}
} else {
fprintf(stderr, "OpenCV: unsupported pixel format 0x%08X\n", pixelFormat);
CVPixelBufferUnlockBaseAddress(mGrabbedPixels, 0);
CVBufferRelease(mGrabbedPixels);
mGrabbedPixels = NULL;
return 0;
return cv::Mat();
}
// Build the header for the device image.
if (mDeviceImage == NULL) {
mDeviceImage = cvCreateImageHeader(cvSize(int(width),int(height)), IPL_DEPTH_8U, deviceChannels);
}
mDeviceImage->width = int(width);
mDeviceImage->height = int(height);
mDeviceImage->nChannels = deviceChannels;
mDeviceImage->depth = IPL_DEPTH_8U;
mDeviceImage->widthStep = int(rowBytes);
mDeviceImage->imageData = reinterpret_cast<char *>(baseaddress);
mDeviceImage->imageSize = int(rowBytes*height);
mOutImage.create(sz, CV_MAKE_TYPE(CV_8U, outChannels));
cv::Mat devImage(sz, CV_MAKE_TYPE(CV_8U, deviceChannels), baseaddress, rowBytes);
// Convert the device image into the output image.
if (cvtCode == -1) {
// Copy.
cv::cvarrToMat(mDeviceImage).copyTo(cv::cvarrToMat(mOutImage));
devImage.copyTo(mOutImage);
} else {
cvtColor(cv::cvarrToMat(mDeviceImage), cv::cvarrToMat(mOutImage), cvtCode);
cv::cvtColor(devImage, mOutImage, cvtCode);
}
@ -1082,35 +976,39 @@ int CvCaptureFile::getPreferredOrientationDegrees() const {
return static_cast<int>(round(radians * 180 / M_PI));
}
IplImage* CvCaptureFile::retrieveFrame(int) {
return retrieveFramePixelBuffer();
bool CvCaptureFile::retrieveFrame_(int, cv::OutputArray arr) {
cv::Mat res = retrieveFramePixelBuffer();
if (res.empty())
return false;
res.copyTo(arr);
return true;
}
double CvCaptureFile::getProperty(int property_id) const{
double CvCaptureFile::getProperty_(int property_id) const{
if (mAsset == nil) return 0;
CMTime t;
switch (property_id) {
case CV_CAP_PROP_POS_MSEC:
case cv::CAP_PROP_POS_MSEC:
return mFrameTimestamp.value * 1000.0 / mFrameTimestamp.timescale;
case CV_CAP_PROP_POS_FRAMES:
case cv::CAP_PROP_POS_FRAMES:
return mAssetTrack.nominalFrameRate > 0 ? mFrameNum : 0;
case CV_CAP_PROP_POS_AVI_RATIO:
case cv::CAP_PROP_POS_AVI_RATIO:
t = [mAsset duration];
return (mFrameTimestamp.value * t.timescale) / double(mFrameTimestamp.timescale * t.value);
case CV_CAP_PROP_FRAME_WIDTH:
case cv::CAP_PROP_FRAME_WIDTH:
return mAssetTrack.naturalSize.width;
case CV_CAP_PROP_FRAME_HEIGHT:
case cv::CAP_PROP_FRAME_HEIGHT:
return mAssetTrack.naturalSize.height;
case CV_CAP_PROP_FPS:
case cv::CAP_PROP_FPS:
return mAssetTrack.nominalFrameRate;
case CV_CAP_PROP_FRAME_COUNT:
case cv::CAP_PROP_FRAME_COUNT:
t = [mAsset duration];
return round((t.value * mAssetTrack.nominalFrameRate) / double(t.timescale));
case CV_CAP_PROP_FORMAT:
case cv::CAP_PROP_FORMAT:
return mFormat;
case CV_CAP_PROP_FOURCC:
case cv::CAP_PROP_FOURCC:
return mMode;
case cv::CAP_PROP_ORIENTATION_META:
return getPreferredOrientationDegrees();
@ -1121,7 +1019,7 @@ double CvCaptureFile::getProperty(int property_id) const{
return 0;
}
bool CvCaptureFile::setProperty(int property_id, double value) {
bool CvCaptureFile::setProperty_(int property_id, double value) {
if (mAsset == nil) return false;
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
@ -1130,20 +1028,20 @@ bool CvCaptureFile::setProperty(int property_id, double value) {
CMTime t;
switch (property_id) {
case CV_CAP_PROP_POS_MSEC:
case cv::CAP_PROP_POS_MSEC:
t = mAsset.duration;
t.value = value * t.timescale / 1000;
retval = setupReadingAt(t);
break;
case CV_CAP_PROP_POS_FRAMES:
case cv::CAP_PROP_POS_FRAMES:
retval = mAssetTrack.nominalFrameRate > 0 ? setupReadingAt(CMTimeMake(value, mAssetTrack.nominalFrameRate)) : false;
break;
case CV_CAP_PROP_POS_AVI_RATIO:
case cv::CAP_PROP_POS_AVI_RATIO:
t = mAsset.duration;
t.value = round(t.value * value);
retval = setupReadingAt(t);
break;
case CV_CAP_PROP_FOURCC:
case cv::CAP_PROP_FOURCC:
uint32_t mode;
mode = cvRound(value);
if (mMode == mode) {
@ -1182,8 +1080,8 @@ bool CvCaptureFile::setProperty(int property_id, double value) {
*****************************************************************************/
CvVideoWriter_AVFoundation::CvVideoWriter_AVFoundation(const std::string &filename, int fourcc, double fps, CvSize frame_size, int is_color)
: argbimage(nil), mMovieWriter(nil), mMovieWriterInput(nil), mMovieWriterAdaptor(nil), path(nil),
CvVideoWriter_AVFoundation::CvVideoWriter_AVFoundation(const std::string &filename, int fourcc, double fps, const cv::Size& frame_size, int is_color)
: mMovieWriter(nil), mMovieWriterInput(nil), mMovieWriterAdaptor(nil), path(nil),
codec(nil), fileType(nil), mMovieFPS(fps), movieSize(frame_size), movieColor(is_color), mFrameNum(0),
is_good(true)
{
@ -1194,7 +1092,7 @@ CvVideoWriter_AVFoundation::CvVideoWriter_AVFoundation(const std::string &filena
}
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
argbimage = cvCreateImage(movieSize, IPL_DEPTH_8U, 4);
argbimage.create(movieSize, CV_8UC4);
path = [[[NSString stringWithUTF8String:filename.c_str()] stringByExpandingTildeInPath] retain];
NSString *fileExt =[[[path pathExtension] lowercaseString] copy];
@ -1314,8 +1212,8 @@ CvVideoWriter_AVFoundation::~CvVideoWriter_AVFoundation() {
[codec release];
if (fileType)
[fileType release];
if (argbimage)
cvReleaseImage(&argbimage);
if (!argbimage.empty())
argbimage.release();
[localpool drain];
@ -1325,14 +1223,14 @@ static void releaseCallback( void *releaseRefCon, const void * ) {
CFRelease((CFDataRef)releaseRefCon);
}
bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
void CvVideoWriter_AVFoundation::write(cv::InputArray image) {
NSAutoreleasePool* localpool = [[NSAutoreleasePool alloc] init];
// writer status check
if (mMovieWriter.status != AVAssetWriterStatusWriting ) {
NSLog(@"mMovieWriter.status: %d. Error: %@", (int)mMovieWriter.status, [mMovieWriter.error localizedDescription]);
[localpool drain];
return false;
return;
}
// Make writeFrame() a blocking call.
@ -1344,25 +1242,23 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
BOOL success = FALSE;
if (iplimage->height!=movieSize.height || iplimage->width!=movieSize.width){
if (image.size().height!=movieSize.height || image.size().width!=movieSize.width){
fprintf(stderr, "OpenCV: Frame size does not match video size.\n");
[localpool drain];
return false;
return;
}
if (movieColor) {
//assert(iplimage->nChannels == 3);
cvtColor(cv::cvarrToMat(iplimage), cv::cvarrToMat(argbimage), cv::COLOR_BGR2BGRA);
cv::cvtColor(image, argbimage, cv::COLOR_BGR2BGRA);
}else{
//assert(iplimage->nChannels == 1);
cvtColor(cv::cvarrToMat(iplimage), cv::cvarrToMat(argbimage), cv::COLOR_GRAY2BGRA);
cv::cvtColor(image, argbimage, cv::COLOR_GRAY2BGRA);
}
//IplImage -> CGImage conversion
CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
NSData *nsData = [NSData dataWithBytes:argbimage->imageData length:argbimage->imageSize];
NSData *nsData = [NSData dataWithBytes:argbimage.data length:argbimage.total() * argbimage.elemSize()];
CGDataProviderRef provider = CGDataProviderCreateWithCFData((CFDataRef)nsData);
CGImageRef cgImage = CGImageCreate(argbimage->width, argbimage->height,
argbimage->depth, argbimage->depth * argbimage->nChannels, argbimage->widthStep,
CGImageRef cgImage = CGImageCreate(argbimage.size().width, argbimage.size().height,
8, 32, argbimage.step[0],
colorSpace, kCGImageAlphaLast|kCGBitmapByteOrderDefault,
provider, NULL, false, kCGRenderingIntentDefault);
@ -1395,10 +1291,8 @@ bool CvVideoWriter_AVFoundation::writeFrame(const IplImage* iplimage) {
if (success) {
mFrameNum ++;
//NSLog(@"Frame #%d", mFrameNum);
return true;
}else{
NSLog(@"Frame appendPixelBuffer failed.");
return false;
}
}

View File

@ -60,6 +60,8 @@
#include <stdlib.h>
#include <string.h>
using namespace cv;
struct CvDC1394
{
CvDC1394();
@ -88,24 +90,27 @@ static CvDC1394& getDC1394()
return dc1394;
}
class CvCaptureCAM_DC1394_v2_CPP : public CvCapture
#define CAP_PROP_MAX_DC1394 31
class CvCaptureCAM_DC1394_v2_CPP : public IVideoCapture
{
public:
static int dc1394properties[CV_CAP_PROP_MAX_DC1394];
static int dc1394properties[CAP_PROP_MAX_DC1394];
CvCaptureCAM_DC1394_v2_CPP();
virtual ~CvCaptureCAM_DC1394_v2_CPP()
~CvCaptureCAM_DC1394_v2_CPP()
{
close();
}
virtual bool open(int index);
virtual void close();
bool open(int index);
void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_DC1394; }
double getProperty(int) const CV_OVERRIDE;
bool setProperty(int, double) CV_OVERRIDE;
bool grabFrame() CV_OVERRIDE;
bool retrieveFrame(int, OutputArray) CV_OVERRIDE;
int getCaptureDomain() CV_OVERRIDE { return CAP_DC1394; }
bool isOpened() const CV_OVERRIDE { return dcCam && started; }
protected:
@ -129,37 +134,37 @@ protected:
dc1394color_filter_t bayerFilter;
enum { NIMG = 2 };
IplImage *img[NIMG];
Mat img[NIMG];
dc1394video_frame_t* frameC;
int nimages;
dc1394featureset_t feature_set;
};
//mapping CV_CAP_PROP_ to DC1394_FEATUREs
int CvCaptureCAM_DC1394_v2_CPP::dc1394properties[CV_CAP_PROP_MAX_DC1394] = {
-1, //no corresponding feature for CV_CAP_PROP_POS_MSEC
//mapping CAP_PROP_ to DC1394_FEATUREs
int CvCaptureCAM_DC1394_v2_CPP::dc1394properties[CAP_PROP_MAX_DC1394] = {
-1, //no corresponding feature for CAP_PROP_POS_MSEC
-1,-1,-1,-1,
DC1394_FEATURE_FRAME_RATE, //CV_CAP_PROP_FPS - fps can be set for format 7 only!
DC1394_FEATURE_FRAME_RATE, //CAP_PROP_FPS - fps can be set for format 7 only!
-1,-1,-1,-1,
DC1394_FEATURE_BRIGHTNESS, //CV_CAP_PROP_BRIGHTNESS 10
DC1394_FEATURE_BRIGHTNESS, //CAP_PROP_BRIGHTNESS 10
-1,
DC1394_FEATURE_SATURATION, //CV_CAP_PROP_SATURATION
DC1394_FEATURE_SATURATION, //CAP_PROP_SATURATION
DC1394_FEATURE_HUE,
DC1394_FEATURE_GAIN,
DC1394_FEATURE_SHUTTER, //CV_CAP_PROP_EXPOSURE
-1, //CV_CAP_PROP_CONVERT_RGB
DC1394_FEATURE_WHITE_BALANCE, //corresponds to CV_CAP_PROP_WHITE_BALANCE_BLUE_U and CV_CAP_PROP_WHITE_BALANCE_RED_V, see set function to check these props are set
DC1394_FEATURE_SHUTTER, //CAP_PROP_EXPOSURE
-1, //CAP_PROP_CONVERT_RGB
DC1394_FEATURE_WHITE_BALANCE, //corresponds to CAP_PROP_WHITE_BALANCE_BLUE_U and CAP_PROP_WHITE_BALANCE_RED_V, see set function to check these props are set
-1,-1,
DC1394_FEATURE_SHARPNESS, //20
DC1394_FEATURE_EXPOSURE, //CV_CAP_PROP_AUTO_EXPOSURE - this is auto exposure according to the IIDC standard
DC1394_FEATURE_GAMMA, //CV_CAP_PROP_GAMMA
DC1394_FEATURE_TEMPERATURE, //CV_CAP_PROP_TEMPERATURE
DC1394_FEATURE_TRIGGER, //CV_CAP_PROP_TRIGGER
DC1394_FEATURE_TRIGGER_DELAY, //CV_CAP_PROP_TRIGGER_DELAY
DC1394_FEATURE_WHITE_BALANCE, //CV_CAP_PROP_WHITE_BALANCE_RED_V
DC1394_FEATURE_ZOOM, //CV_CAP_PROP_ZOOM
DC1394_FEATURE_FOCUS, //CV_CAP_PROP_FOCUS
-1 //CV_CAP_PROP_GUID
DC1394_FEATURE_EXPOSURE, //CAP_PROP_AUTO_EXPOSURE - this is auto exposure according to the IIDC standard
DC1394_FEATURE_GAMMA, //CAP_PROP_GAMMA
DC1394_FEATURE_TEMPERATURE, //CAP_PROP_TEMPERATURE
DC1394_FEATURE_TRIGGER, //CAP_PROP_TRIGGER
DC1394_FEATURE_TRIGGER_DELAY, //CAP_PROP_TRIGGER_DELAY
DC1394_FEATURE_WHITE_BALANCE, //CAP_PROP_WHITE_BALANCE_RED_V
DC1394_FEATURE_ZOOM, //CAP_PROP_ZOOM
DC1394_FEATURE_FOCUS, //CAP_PROP_FOCUS
-1 //CAP_PROP_GUID
};
CvCaptureCAM_DC1394_v2_CPP::CvCaptureCAM_DC1394_v2_CPP()
{
@ -177,8 +182,6 @@ CvCaptureCAM_DC1394_v2_CPP::CvCaptureCAM_DC1394_v2_CPP()
frameWidth = 640;
frameHeight = 480;
for (int i = 0; i < NIMG; i++)
img[i] = 0;
frameC = 0;
nimages = 1;
userMode = -1;
@ -396,7 +399,7 @@ void CvCaptureCAM_DC1394_v2_CPP::close()
for (int i = 0; i < NIMG; i++)
{
cvReleaseImage(&img[i]);
img[i].release();
}
if (frameC)
{
@ -446,7 +449,6 @@ bool CvCaptureCAM_DC1394_v2_CPP::grabFrame()
for (i = 0; i < nimages; i++)
{
IplImage fhdr;
dc1394video_frame_t f = fs ? *fs : *dcFrame, *fc = &f;
f.size[1] /= nimages;
f.image += f.size[0] * f.size[1] * i; // TODO: make it more universal
@ -468,19 +470,18 @@ bool CvCaptureCAM_DC1394_v2_CPP::grabFrame()
}
fc = frameC;
}
if (!img[i])
img[i] = cvCreateImage(cvSize(fc->size[0], fc->size[1]), 8, nch);
cvInitImageHeader(&fhdr, cvSize(fc->size[0], fc->size[1]), 8, nch);
cvSetData(&fhdr, fc->image, fc->size[0]*nch);
Mat frame(Size(fc->size[0], fc->size[1]), CV_MAKE_TYPE(CV_8U, nch), fc->image);
img[i].create(frame.size(), frame.type());
// Swap R&B channels:
if (nch==3)
{
cv::Mat tmp = cv::cvarrToMat(&fhdr);
cv::cvtColor(tmp, tmp, cv::COLOR_RGB2BGR, tmp.channels());
cv::cvtColor(frame, img[i], cv::COLOR_RGB2BGR);
}
else
{
frame.copyTo(img[i]);
}
cvCopy(&fhdr, img[i]);
}
code = true;
@ -498,9 +499,13 @@ _exit_:
return code;
}
IplImage* CvCaptureCAM_DC1394_v2_CPP::retrieveFrame(int idx)
bool CvCaptureCAM_DC1394_v2_CPP::retrieveFrame(int idx, OutputArray arr)
{
return 0 <= idx && idx < nimages ? img[idx] : 0;
if (0 <= idx && idx < nimages)
img[idx].copyTo(arr);
else
return false;
return true;
}
double CvCaptureCAM_DC1394_v2_CPP::getProperty(int propId) const
@ -510,40 +515,40 @@ double CvCaptureCAM_DC1394_v2_CPP::getProperty(int propId) const
switch (propId)
{
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
return frameWidth ? frameWidth : frameHeight*4 / 3;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
return frameHeight ? frameHeight : frameWidth*3 / 4;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
return fps;
case CV_CAP_PROP_RECTIFICATION:
case CAP_PROP_RECTIFICATION:
CV_LOG_WARNING(NULL, "cap_dc1394: rectification support has been removed from videoio module");
return 0;
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
case CAP_PROP_WHITE_BALANCE_BLUE_U:
if (dc1394_feature_whitebalance_get_value(dcCam,
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value,
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value) == DC1394_SUCCESS)
return feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value;
break;
case CV_CAP_PROP_WHITE_BALANCE_RED_V:
case CAP_PROP_WHITE_BALANCE_RED_V:
if (dc1394_feature_whitebalance_get_value(dcCam,
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].BU_value,
&fs.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value) == DC1394_SUCCESS)
return feature_set.feature[DC1394_FEATURE_WHITE_BALANCE-DC1394_FEATURE_MIN].RV_value;
break;
case CV_CAP_PROP_GUID:
case CAP_PROP_GUID:
//the least 32 bits are enough to identify the camera
return (double) (guid & 0x00000000FFFFFFFF);
break;
case CV_CAP_PROP_MODE:
case CAP_PROP_MODE:
return (double) userMode;
break;
case CV_CAP_PROP_ISO_SPEED:
case CAP_PROP_ISO_SPEED:
return (double) isoSpeed;
case CV_CAP_PROP_BUFFERSIZE:
case CAP_PROP_BUFFERSIZE:
return (double) nDMABufs;
default:
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
if (propId<CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
&& dcCam)
//&& feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN].on_off_capable)
if (dc1394_feature_get_value(dcCam,(dc1394feature_t)dc1394properties[propId],
@ -557,50 +562,50 @@ bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
{
switch (propId)
{
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
if(started)
return false;
frameWidth = cvRound(value);
frameHeight = 0;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
if(started)
return false;
frameWidth = 0;
frameHeight = cvRound(value);
break;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
if(started)
return false;
fps = value;
break;
case CV_CAP_PROP_RECTIFICATION:
case CAP_PROP_RECTIFICATION:
CV_LOG_WARNING(NULL, "cap_dc1394: rectification support has been removed from videoio module");
return false;
case CV_CAP_PROP_MODE:
case CAP_PROP_MODE:
if(started)
return false;
userMode = cvRound(value);
break;
case CV_CAP_PROP_ISO_SPEED:
case CAP_PROP_ISO_SPEED:
if(started)
return false;
isoSpeed = cvRound(value);
break;
case CV_CAP_PROP_BUFFERSIZE:
case CAP_PROP_BUFFERSIZE:
if(started)
return false;
nDMABufs = value;
break;
//The code below is based on coriander, callbacks.c:795, refer to case RANGE_MENU_MAN :
default:
if (propId<CV_CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
if (propId<CAP_PROP_MAX_DC1394 && dc1394properties[propId]!=-1
&& dcCam)
{
//get the corresponding feature from property-id
dc1394feature_info_t *act_feature = &feature_set.feature[dc1394properties[propId]-DC1394_FEATURE_MIN];
if (cvRound(value) == CV_CAP_PROP_DC1394_OFF)
if (cvRound(value) == CAP_PROP_DC1394_OFF)
{
if ( (act_feature->on_off_capable)
&& (dc1394_feature_set_power(dcCam, act_feature->id, DC1394_OFF) == DC1394_SUCCESS))
@ -624,7 +629,7 @@ bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
else
act_feature->abs_control=DC1394_OFF;
//set AUTO
if (cvRound(value) == CV_CAP_PROP_DC1394_MODE_AUTO)
if (cvRound(value) == CAP_PROP_DC1394_MODE_AUTO)
{
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_AUTO)!=DC1394_SUCCESS)
return false;
@ -632,7 +637,7 @@ bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
return true;
}
//set ONE PUSH
if (cvRound(value) == CV_CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO)
if (cvRound(value) == CAP_PROP_DC1394_MODE_ONE_PUSH_AUTO)
{
//have to set to manual first, otherwise one push will be ignored (AVT manual 4.3.0 p. 115)
if (dc1394_feature_set_mode(dcCam, act_feature->id, DC1394_FEATURE_MODE_ONE_PUSH_AUTO)!=DC1394_SUCCESS)
@ -647,7 +652,7 @@ bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
else
act_feature->current_mode=DC1394_FEATURE_MODE_MANUAL;
// if property is one of the white balance features treat it in different way
if (propId == CV_CAP_PROP_WHITE_BALANCE_BLUE_U)
if (propId == CAP_PROP_WHITE_BALANCE_BLUE_U)
{
if (dc1394_feature_whitebalance_set_value(dcCam,cvRound(value), act_feature->RV_value)!=DC1394_SUCCESS)
return false;
@ -657,7 +662,7 @@ bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
return true;
}
}
if (propId == CV_CAP_PROP_WHITE_BALANCE_RED_V)
if (propId == CAP_PROP_WHITE_BALANCE_RED_V)
{
if (dc1394_feature_whitebalance_set_value(dcCam, act_feature->BU_value, cvRound(value))!=DC1394_SUCCESS)
return false;
@ -692,10 +697,9 @@ bool CvCaptureCAM_DC1394_v2_CPP::setProperty(int propId, double value)
cv::Ptr<cv::IVideoCapture> cv::create_DC1394_capture(int index)
{
CvCaptureCAM_DC1394_v2_CPP* capture = new CvCaptureCAM_DC1394_v2_CPP;
Ptr<CvCaptureCAM_DC1394_v2_CPP> capture = makePtr<CvCaptureCAM_DC1394_v2_CPP>();
if (capture->open(index))
return cv::makePtr<cv::LegacyCapture>(capture);
delete capture;
return capture;
return 0;
}

View File

@ -41,6 +41,8 @@
#include "precomp.hpp"
using namespace cv;
#if defined _WIN32 && defined HAVE_DSHOW
#include "cap_dshow.hpp"
@ -544,7 +546,7 @@ class videoInput{
//number of devices available
int devicesFound;
// mapping from OpenCV CV_CAP_PROP to videoinput/dshow properties
// mapping from OpenCV CAP_PROP to videoinput/dshow properties
int getVideoPropertyFromCV(int cv_property);
int getCameraPropertyFromCV(int cv_property);
@ -2379,37 +2381,37 @@ void videoInput::getVideoPropertyAsString(int prop, char * propertyAsString){
int videoInput::getVideoPropertyFromCV(int cv_property){
// see VideoProcAmpProperty in strmif.h
switch (cv_property) {
case CV_CAP_PROP_BRIGHTNESS:
case CAP_PROP_BRIGHTNESS:
return VideoProcAmp_Brightness;
case CV_CAP_PROP_CONTRAST:
case CAP_PROP_CONTRAST:
return VideoProcAmp_Contrast;
case CV_CAP_PROP_HUE:
case CAP_PROP_HUE:
return VideoProcAmp_Hue;
case CV_CAP_PROP_SATURATION:
case CAP_PROP_SATURATION:
return VideoProcAmp_Saturation;
case CV_CAP_PROP_SHARPNESS:
case CAP_PROP_SHARPNESS:
return VideoProcAmp_Sharpness;
case CV_CAP_PROP_GAMMA:
case CAP_PROP_GAMMA:
return VideoProcAmp_Gamma;
case CV_CAP_PROP_MONOCHROME:
case CAP_PROP_MONOCHROME:
return VideoProcAmp_ColorEnable;
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
case CAP_PROP_WHITE_BALANCE_BLUE_U:
return VideoProcAmp_WhiteBalance;
case cv::VideoCaptureProperties::CAP_PROP_AUTO_WB:
return VideoProcAmp_WhiteBalance;
case CV_CAP_PROP_BACKLIGHT:
case CAP_PROP_BACKLIGHT:
return VideoProcAmp_BacklightCompensation;
case CV_CAP_PROP_GAIN:
case CAP_PROP_GAIN:
return VideoProcAmp_Gain;
}
return -1;
@ -2419,26 +2421,29 @@ int videoInput::getCameraPropertyFromCV(int cv_property){
// see CameraControlProperty in strmif.h
switch (cv_property) {
case CV_CAP_PROP_PAN:
case CAP_PROP_PAN:
return CameraControl_Pan;
case CV_CAP_PROP_TILT:
case CAP_PROP_TILT:
return CameraControl_Tilt;
case CV_CAP_PROP_ROLL:
case CAP_PROP_ROLL:
return CameraControl_Roll;
case CV_CAP_PROP_ZOOM:
case CAP_PROP_ZOOM:
return CameraControl_Zoom;
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
return CameraControl_Exposure;
case CV_CAP_PROP_IRIS:
case CAP_PROP_IRIS:
return CameraControl_Iris;
case CV_CAP_PROP_FOCUS:
case CAP_PROP_FOCUS:
return CameraControl_Focus;
default:
break;
}
return -1;
}
@ -3389,35 +3394,35 @@ double VideoCapture_DShow::getProperty(int propIdx) const
switch (propIdx)
{
// image format properties
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
return g_VI.getWidth(m_index);
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
return g_VI.getHeight(m_index);
case CV_CAP_PROP_FOURCC:
case CAP_PROP_FOURCC:
return g_VI.getFourcc(m_index);
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
return g_VI.getFPS(m_index);
case CV_CAP_PROP_CONVERT_RGB:
case CAP_PROP_CONVERT_RGB:
return g_VI.getConvertRGB(m_index);
case CAP_PROP_CHANNEL:
return g_VI.getChannel(m_index);
case CV_CAP_PROP_AUTOFOCUS:
case CAP_PROP_AUTOFOCUS:
// Flags indicate whether or not autofocus is enabled
if (g_VI.getVideoSettingCamera(m_index, CameraControl_Focus, min_value, max_value, stepping_delta, current_value, flags, defaultValue))
return (double)flags;
break;
// video filter properties
case CV_CAP_PROP_BRIGHTNESS:
case CV_CAP_PROP_CONTRAST:
case CV_CAP_PROP_HUE:
case CV_CAP_PROP_SATURATION:
case CV_CAP_PROP_SHARPNESS:
case CV_CAP_PROP_GAMMA:
case CV_CAP_PROP_MONOCHROME:
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
case CV_CAP_PROP_BACKLIGHT:
case CV_CAP_PROP_GAIN:
case CAP_PROP_BRIGHTNESS:
case CAP_PROP_CONTRAST:
case CAP_PROP_HUE:
case CAP_PROP_SATURATION:
case CAP_PROP_SHARPNESS:
case CAP_PROP_GAMMA:
case CAP_PROP_MONOCHROME:
case CAP_PROP_WHITE_BALANCE_BLUE_U:
case CAP_PROP_BACKLIGHT:
case CAP_PROP_GAIN:
if (g_VI.getVideoSettingFilter(m_index, g_VI.getVideoPropertyFromCV(propIdx), min_value, max_value, stepping_delta, current_value, flags, defaultValue))
return (double)current_value;
break;
@ -3428,17 +3433,17 @@ double VideoCapture_DShow::getProperty(int propIdx) const
break;
// camera properties
case CV_CAP_PROP_PAN:
case CV_CAP_PROP_TILT:
case CV_CAP_PROP_ROLL:
case CV_CAP_PROP_ZOOM:
case CV_CAP_PROP_EXPOSURE:
case CV_CAP_PROP_IRIS:
case CV_CAP_PROP_FOCUS:
case CAP_PROP_PAN:
case CAP_PROP_TILT:
case CAP_PROP_ROLL:
case CAP_PROP_ZOOM:
case CAP_PROP_EXPOSURE:
case CAP_PROP_IRIS:
case CAP_PROP_FOCUS:
if (g_VI.getVideoSettingCamera(m_index, g_VI.getCameraPropertyFromCV(propIdx), min_value, max_value, stepping_delta, current_value, flags, defaultValue))
return (double)current_value;
break;
case CV_CAP_PROP_SETTINGS:
case CAP_PROP_SETTINGS:
return g_VI.property_window_count(m_index);
default:
break;
@ -3452,17 +3457,17 @@ bool VideoCapture_DShow::setProperty(int propIdx, double propVal)
bool handled = false;
switch (propIdx)
{
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
m_width = cvRound(propVal);
handled = true;
break;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
m_height = cvRound(propVal);
handled = true;
break;
case CV_CAP_PROP_FOURCC:
case CAP_PROP_FOURCC:
m_fourcc = (int)(unsigned long)(propVal);
m_width = (int)getProperty(CAP_PROP_FRAME_WIDTH);
m_height = (int)getProperty(CAP_PROP_FRAME_HEIGHT);
@ -3488,7 +3493,7 @@ bool VideoCapture_DShow::setProperty(int propIdx, double propVal)
g_VI.setConvertRGB(m_index, m_convertRGBSet);
break;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
{
int fps = cvRound(propVal);
if (fps != g_VI.getFPS(m_index))
@ -3504,7 +3509,7 @@ bool VideoCapture_DShow::setProperty(int propIdx, double propVal)
return g_VI.isDeviceSetup(m_index);
}
case CV_CAP_PROP_AUTO_EXPOSURE:
case CAP_PROP_AUTO_EXPOSURE:
{
// Flags are required to toggle auto exposure or not, but the setProperty interface does not support multiple parameters
bool enabled = cvRound(propVal) == 1;
@ -3516,7 +3521,7 @@ bool VideoCapture_DShow::setProperty(int propIdx, double propVal)
return g_VI.setVideoSettingCamera(m_index, CameraControl_Exposure, currentExposure, enabled ? CameraControl_Flags_Auto | CameraControl_Flags_Manual : CameraControl_Flags_Manual, enabled ? true : false);
}
case CV_CAP_PROP_AUTOFOCUS:
case CAP_PROP_AUTOFOCUS:
{
// Flags are required to toggle autofocus or not, but the setProperty interface does not support multiple parameters
bool enabled = cvRound(propVal) == 1;
@ -3528,7 +3533,7 @@ bool VideoCapture_DShow::setProperty(int propIdx, double propVal)
return g_VI.setVideoSettingCamera(m_index, CameraControl_Focus, currentFocus, enabled ? CameraControl_Flags_Auto | CameraControl_Flags_Manual : CameraControl_Flags_Manual, enabled ? true : false);
}
case CV_CAP_PROP_CONVERT_RGB:
case CAP_PROP_CONVERT_RGB:
{
const bool convertRgb = cvRound(propVal) == 1;
const bool success = g_VI.setConvertRGB(m_index, convertRgb);
@ -3575,14 +3580,14 @@ bool VideoCapture_DShow::setProperty(int propIdx, double propVal)
switch (propIdx)
{
case cv::VideoCaptureProperties::CAP_PROP_AUTO_WB:
case CV_CAP_PROP_AUTO_EXPOSURE:
case CAP_PROP_AUTO_EXPOSURE:
useDefaultValue = true;
if (cvRound(propVal) == 1)
flags = VideoProcAmp_Flags_Auto;
else
flags = VideoProcAmp_Flags_Manual;
break;
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
case CAP_PROP_WHITE_BALANCE_BLUE_U:
flags = VideoProcAmp_Flags_Manual;
break;
}
@ -3590,33 +3595,33 @@ bool VideoCapture_DShow::setProperty(int propIdx, double propVal)
//video Filter properties
switch (propIdx)
{
case CV_CAP_PROP_BRIGHTNESS:
case CV_CAP_PROP_CONTRAST:
case CV_CAP_PROP_HUE:
case CV_CAP_PROP_SATURATION:
case CV_CAP_PROP_SHARPNESS:
case CV_CAP_PROP_GAMMA:
case CV_CAP_PROP_MONOCHROME:
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
case CAP_PROP_BRIGHTNESS:
case CAP_PROP_CONTRAST:
case CAP_PROP_HUE:
case CAP_PROP_SATURATION:
case CAP_PROP_SHARPNESS:
case CAP_PROP_GAMMA:
case CAP_PROP_MONOCHROME:
case CAP_PROP_WHITE_BALANCE_BLUE_U:
case cv::VideoCaptureProperties::CAP_PROP_AUTO_WB:
case CV_CAP_PROP_BACKLIGHT:
case CV_CAP_PROP_GAIN:
case CAP_PROP_BACKLIGHT:
case CAP_PROP_GAIN:
return g_VI.setVideoSettingFilter(m_index, g_VI.getVideoPropertyFromCV(propIdx), (long)propVal, flags, useDefaultValue);
}
//camera properties
switch (propIdx)
{
case CV_CAP_PROP_PAN:
case CV_CAP_PROP_TILT:
case CV_CAP_PROP_ROLL:
case CV_CAP_PROP_ZOOM:
case CV_CAP_PROP_EXPOSURE:
case CV_CAP_PROP_IRIS:
case CV_CAP_PROP_FOCUS:
case CAP_PROP_PAN:
case CAP_PROP_TILT:
case CAP_PROP_ROLL:
case CAP_PROP_ZOOM:
case CAP_PROP_EXPOSURE:
case CAP_PROP_IRIS:
case CAP_PROP_FOCUS:
return g_VI.setVideoSettingCamera(m_index, g_VI.getCameraPropertyFromCV(propIdx), (long)propVal);
// show video/camera filter dialog
case CV_CAP_PROP_SETTINGS:
case CAP_PROP_SETTINGS:
return g_VI.showSettingsWindow(m_index);
}
@ -3646,7 +3651,7 @@ bool VideoCapture_DShow::retrieveFrame(int, OutputArray frame)
}
int VideoCapture_DShow::getCaptureDomain()
{
return CV_CAP_DSHOW;
return CAP_DSHOW;
}
bool VideoCapture_DShow::isOpened() const
{

View File

@ -65,7 +65,7 @@
namespace cv {
namespace {
class CvCapture_FFMPEG_proxy CV_FINAL : public cv::IVideoCapture
class CvCapture_FFMPEG_proxy CV_FINAL : public cv::VideoCaptureBase
{
public:
CvCapture_FFMPEG_proxy() { ffmpegCapture = 0; }
@ -76,11 +76,11 @@ public:
}
virtual ~CvCapture_FFMPEG_proxy() { close(); }
virtual double getProperty(int propId) const CV_OVERRIDE
virtual double getProperty_(int propId) const CV_OVERRIDE
{
return ffmpegCapture ? icvGetCaptureProperty_FFMPEG_p(ffmpegCapture, propId) : 0;
}
virtual bool setProperty(int propId, double value) CV_OVERRIDE
virtual bool setProperty_(int propId, double value) CV_OVERRIDE
{
return ffmpegCapture ? icvSetCaptureProperty_FFMPEG_p(ffmpegCapture, propId, value)!=0 : false;
}
@ -88,7 +88,7 @@ public:
{
return ffmpegCapture ? icvGrabFrame_FFMPEG_p(ffmpegCapture)!=0 : false;
}
virtual bool retrieveFrame(int flag, cv::OutputArray frame) CV_OVERRIDE
virtual bool retrieveFrame_(int flag, cv::OutputArray frame) CV_OVERRIDE
{
unsigned char* data = 0;
int step=0, width=0, height=0, cn=0, depth=0;
@ -112,10 +112,7 @@ public:
return false;
}
cv::Mat tmp(height, width, CV_MAKETYPE(depth, cn), data, step);
applyMetadataRotation(*this, tmp);
tmp.copyTo(frame);
cv::Mat(height, width, CV_MAKETYPE(depth, cn), data, step).copyTo(frame);
return true;
}
bool open(const cv::String& filename, const cv::VideoCaptureParameters& params)
@ -134,7 +131,7 @@ public:
}
virtual bool isOpened() const CV_OVERRIDE { return ffmpegCapture != 0; }
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_FFMPEG; }
virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_FFMPEG; }
protected:
CvCapture_FFMPEG* ffmpegCapture;

View File

@ -569,7 +569,6 @@ struct CvCapture_FFMPEG
int64_t frame_number, first_frame_number;
bool rotation_auto;
int rotation_angle; // valid 0, 90, 180, 270
double eps_zero;
/*
@ -634,11 +633,6 @@ void CvCapture_FFMPEG::init()
rotation_angle = 0;
#if (LIBAVUTIL_BUILD >= CALC_FFMPEG_VERSION(52, 92, 100))
rotation_auto = true;
#else
rotation_auto = false;
#endif
dict = NULL;
#if USE_AV_INTERRUPT_CALLBACK
@ -1808,9 +1802,9 @@ double CvCapture_FFMPEG::getProperty( int property_id ) const
case CAP_PROP_FRAME_COUNT:
return (double)get_total_frames();
case CAP_PROP_FRAME_WIDTH:
return (double)((rotation_auto && ((rotation_angle%180) != 0)) ? frame.height : frame.width);
return frame.width;
case CAP_PROP_FRAME_HEIGHT:
return (double)((rotation_auto && ((rotation_angle%180) != 0)) ? frame.width : frame.height);
return frame.height;
case CAP_PROP_FRAME_TYPE:
return (double)av_get_picture_type_char(picture->pict_type);
case CAP_PROP_FPS:
@ -1852,12 +1846,6 @@ double CvCapture_FFMPEG::getProperty( int property_id ) const
return static_cast<double>(get_bitrate());
case CAP_PROP_ORIENTATION_META:
return static_cast<double>(rotation_angle);
case CAP_PROP_ORIENTATION_AUTO:
#if LIBAVUTIL_BUILD >= CALC_FFMPEG_VERSION(52, 94, 100)
return static_cast<double>(rotation_auto);
#else
return 0;
#endif
#if USE_AV_HW_CODECS
case CAP_PROP_HW_ACCELERATION:
return static_cast<double>(va_type);
@ -2077,14 +2065,6 @@ bool CvCapture_FFMPEG::setProperty( int property_id, double value )
case CAP_PROP_CONVERT_RGB:
convertRGB = (value != 0);
return true;
case CAP_PROP_ORIENTATION_AUTO:
#if LIBAVUTIL_BUILD >= CALC_FFMPEG_VERSION(52, 94, 100)
rotation_auto = value != 0 ? true : false;
return true;
#else
rotation_auto = false;
return false;
#endif
default:
return false;
}

View File

@ -102,8 +102,8 @@ public:
* and CAP_PROP_GPHOTO2_FLUSH_MSGS (will return pointer to char array).
* 6. Camera settings are fetched from device as lazy as possible.
* It creates problem with situation when change of one setting
* affects another setting. You can use CV_CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE
* or CV_CAP_PROP_GPHOTO2_RELOAD_CONFIG to be sure that property you are
* affects another setting. You can use CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE
* or CAP_PROP_GPHOTO2_RELOAD_CONFIG to be sure that property you are
* planning to get will be actual.
*
* Capture can work in 2 main modes: preview and final.
@ -111,22 +111,22 @@ public:
* Change modes with CAP_PROP_GPHOTO2_PREVIEW property.
*
* Moreover some generic properties are mapped to widgets, or implemented:
* * CV_CAP_PROP_SPEED,
* * CV_CAP_PROP_APERATURE,
* * CV_CAP_PROP_EXPOSUREPROGRAM,
* * CV_CAP_PROP_VIEWFINDER,
* * CV_CAP_PROP_POS_MSEC,
* * CV_CAP_PROP_POS_FRAMES,
* * CV_CAP_PROP_FRAME_WIDTH,
* * CV_CAP_PROP_FRAME_HEIGHT,
* * CV_CAP_PROP_FPS,
* * CV_CAP_PROP_FRAME_COUNT
* * CV_CAP_PROP_FORMAT,
* * CV_CAP_PROP_EXPOSURE,
* * CV_CAP_PROP_TRIGGER_DELAY,
* * CV_CAP_PROP_ZOOM,
* * CV_CAP_PROP_FOCUS,
* * CV_CAP_PROP_ISO_SPEED.
* * CAP_PROP_SPEED,
* * CAP_PROP_APERATURE,
* * CAP_PROP_EXPOSUREPROGRAM,
* * CAP_PROP_VIEWFINDER,
* * CAP_PROP_POS_MSEC,
* * CAP_PROP_POS_FRAMES,
* * CAP_PROP_FRAME_WIDTH,
* * CAP_PROP_FRAME_HEIGHT,
* * CAP_PROP_FPS,
* * CAP_PROP_FRAME_COUNT
* * CAP_PROP_FORMAT,
* * CAP_PROP_EXPOSURE,
* * CAP_PROP_TRIGGER_DELAY,
* * CAP_PROP_ZOOM,
* * CAP_PROP_FOCUS,
* * CAP_PROP_ISO_SPEED.
*/
class DigitalCameraCapture: public IVideoCapture
{
@ -144,7 +144,7 @@ public:
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual bool retrieveFrame(int, OutputArray) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_GPHOTO2; }
virtual int getCaptureDomain() CV_OVERRIDE { return CAP_GPHOTO2; }
bool open(int index);
void close();
@ -216,10 +216,10 @@ private:
std::deque<CameraFile *> grabbedFrames;
// Properties
bool preview; // CV_CAP_PROP_GPHOTO2_PREVIEW
std::string widgetInfo; // CV_CAP_PROP_GPHOTO2_WIDGET_ENUMERATE
bool preview; // CAP_PROP_GPHOTO2_PREVIEW
std::string widgetInfo; // CAP_PROP_GPHOTO2_WIDGET_ENUMERATE
std::map<int, CameraWidget *> widgets;
bool reloadOnChange; // CV_CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE
bool reloadOnChange; // CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE
time_t firstCapturedFrameTime;
unsigned long int capturedFrames;
@ -232,9 +232,9 @@ private:
int collectWidgets(std::ostream &os, CameraWidget * widget);
// Messages / debug
mutable std::ostringstream msgsBuffer; // CV_CAP_PROP_GPHOTO2_FLUSH_MSGS
mutable std::string lastFlush; // CV_CAP_PROP_GPHOTO2_FLUSH_MSGS
bool collectMsgs; // CV_CAP_PROP_GPHOTO2_COLLECT_MSGS
mutable std::ostringstream msgsBuffer; // CAP_PROP_GPHOTO2_FLUSH_MSGS
mutable std::string lastFlush; // CAP_PROP_GPHOTO2_FLUSH_MSGS
bool collectMsgs; // CAP_PROP_GPHOTO2_COLLECT_MSGS
};
/**
@ -506,18 +506,18 @@ CameraWidget * DigitalCameraCapture::getGenericProperty(int propertyId,
{
switch (propertyId)
{
case CV_CAP_PROP_POS_MSEC:
case CAP_PROP_POS_MSEC:
{
// Only seconds level precision, FUTURE: cross-platform milliseconds
output = (time(0) - firstCapturedFrameTime) * 1e2;
return NULL;
}
case CV_CAP_PROP_POS_FRAMES:
case CAP_PROP_POS_FRAMES:
{
output = capturedFrames;
return NULL;
}
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
{
if (!frame.empty())
{
@ -525,7 +525,7 @@ CameraWidget * DigitalCameraCapture::getGenericProperty(int propertyId,
}
return NULL;
}
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
{
if (!frame.empty())
{
@ -533,7 +533,7 @@ CameraWidget * DigitalCameraCapture::getGenericProperty(int propertyId,
}
return NULL;
}
case CV_CAP_PROP_FORMAT:
case CAP_PROP_FORMAT:
{
if (!frame.empty())
{
@ -541,39 +541,39 @@ CameraWidget * DigitalCameraCapture::getGenericProperty(int propertyId,
}
return NULL;
}
case CV_CAP_PROP_FPS: // returns average fps from the begin
case CAP_PROP_FPS: // returns average fps from the begin
{
double wholeProcessTime = 0;
getGenericProperty(CV_CAP_PROP_POS_MSEC, wholeProcessTime);
getGenericProperty(CAP_PROP_POS_MSEC, wholeProcessTime);
wholeProcessTime /= 1e2;
output = capturedFrames / wholeProcessTime;
return NULL;
}
case CV_CAP_PROP_FRAME_COUNT:
case CAP_PROP_FRAME_COUNT:
{
output = capturedFrames;
return NULL;
}
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
return findWidgetByName(PROP_EXPOSURE_COMPENSACTION);
case CV_CAP_PROP_TRIGGER_DELAY:
case CAP_PROP_TRIGGER_DELAY:
return findWidgetByName(PROP_SELF_TIMER_DELAY);
case CV_CAP_PROP_ZOOM:
case CAP_PROP_ZOOM:
return findWidgetByName(PROP_MANUALFOCUS);
case CV_CAP_PROP_FOCUS:
case CAP_PROP_FOCUS:
return findWidgetByName(PROP_AUTOFOCUS);
case CV_CAP_PROP_ISO_SPEED:
case CAP_PROP_ISO_SPEED:
return findWidgetByName(PROP_ISO);
case CV_CAP_PROP_SPEED:
case CAP_PROP_SPEED:
return findWidgetByName(PROP_SPEED);
case CV_CAP_PROP_APERTURE:
case CAP_PROP_APERTURE:
{
CameraWidget * widget = findWidgetByName(PROP_APERTURE_NIKON);
return (widget == 0) ? findWidgetByName(PROP_APERTURE_CANON) : widget;
}
case CV_CAP_PROP_EXPOSUREPROGRAM:
case CAP_PROP_EXPOSUREPROGRAM:
return findWidgetByName(PROP_EXPOSURE_PROGRAM);
case CV_CAP_PROP_VIEWFINDER:
case CAP_PROP_VIEWFINDER:
return findWidgetByName(PROP_VIEWFINDER);
}
return NULL;
@ -596,19 +596,19 @@ double DigitalCameraCapture::getProperty(int propertyId) const
switch (propertyId)
{
// gphoto2 cap featured
case CV_CAP_PROP_GPHOTO2_PREVIEW:
case CAP_PROP_GPHOTO2_PREVIEW:
return preview;
case CV_CAP_PROP_GPHOTO2_WIDGET_ENUMERATE:
case CAP_PROP_GPHOTO2_WIDGET_ENUMERATE:
if (rootWidget == NULL)
return 0;
return (intptr_t) widgetInfo.c_str();
case CV_CAP_PROP_GPHOTO2_RELOAD_CONFIG:
case CAP_PROP_GPHOTO2_RELOAD_CONFIG:
return 0; // Trigger, only by set
case CV_CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE:
case CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE:
return reloadOnChange;
case CV_CAP_PROP_GPHOTO2_COLLECT_MSGS:
case CAP_PROP_GPHOTO2_COLLECT_MSGS:
return collectMsgs;
case CV_CAP_PROP_GPHOTO2_FLUSH_MSGS:
case CAP_PROP_GPHOTO2_FLUSH_MSGS:
lastFlush = msgsBuffer.str();
msgsBuffer.str("");
msgsBuffer.clear();
@ -682,35 +682,35 @@ CameraWidget * DigitalCameraCapture::setGenericProperty(int propertyId,
{
switch (propertyId)
{
case CV_CAP_PROP_POS_MSEC:
case CV_CAP_PROP_POS_FRAMES:
case CV_CAP_PROP_FRAME_WIDTH:
case CV_CAP_PROP_FRAME_HEIGHT:
case CV_CAP_PROP_FPS:
case CV_CAP_PROP_FRAME_COUNT:
case CV_CAP_PROP_FORMAT:
case CAP_PROP_POS_MSEC:
case CAP_PROP_POS_FRAMES:
case CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FPS:
case CAP_PROP_FRAME_COUNT:
case CAP_PROP_FORMAT:
output = false;
return NULL;
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
return findWidgetByName(PROP_EXPOSURE_COMPENSACTION);
case CV_CAP_PROP_TRIGGER_DELAY:
case CAP_PROP_TRIGGER_DELAY:
return findWidgetByName(PROP_SELF_TIMER_DELAY);
case CV_CAP_PROP_ZOOM:
case CAP_PROP_ZOOM:
return findWidgetByName(PROP_MANUALFOCUS);
case CV_CAP_PROP_FOCUS:
case CAP_PROP_FOCUS:
return findWidgetByName(PROP_AUTOFOCUS);
case CV_CAP_PROP_ISO_SPEED:
case CAP_PROP_ISO_SPEED:
return findWidgetByName(PROP_ISO);
case CV_CAP_PROP_SPEED:
case CAP_PROP_SPEED:
return findWidgetByName(PROP_SPEED);
case CV_CAP_PROP_APERTURE:
case CAP_PROP_APERTURE:
{
CameraWidget * widget = findWidgetByName(PROP_APERTURE_NIKON);
return (widget == NULL) ? findWidgetByName(PROP_APERTURE_CANON) : widget;
}
case CV_CAP_PROP_EXPOSUREPROGRAM:
case CAP_PROP_EXPOSUREPROGRAM:
return findWidgetByName(PROP_EXPOSURE_PROGRAM);
case CV_CAP_PROP_VIEWFINDER:
case CAP_PROP_VIEWFINDER:
return findWidgetByName(PROP_VIEWFINDER);
}
return NULL;
@ -733,21 +733,21 @@ bool DigitalCameraCapture::setProperty(int propertyId, double value)
switch (propertyId)
{
// gphoto2 cap featured
case CV_CAP_PROP_GPHOTO2_PREVIEW:
case CAP_PROP_GPHOTO2_PREVIEW:
preview = value != 0;
return true;
case CV_CAP_PROP_GPHOTO2_WIDGET_ENUMERATE:
case CAP_PROP_GPHOTO2_WIDGET_ENUMERATE:
return false;
case CV_CAP_PROP_GPHOTO2_RELOAD_CONFIG:
case CAP_PROP_GPHOTO2_RELOAD_CONFIG:
reloadConfig();
return true;
case CV_CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE:
case CAP_PROP_GPHOTO2_RELOAD_ON_CHANGE:
reloadOnChange = value != 0;
return true;
case CV_CAP_PROP_GPHOTO2_COLLECT_MSGS:
case CAP_PROP_GPHOTO2_COLLECT_MSGS:
collectMsgs = value != 0;
return true;
case CV_CAP_PROP_GPHOTO2_FLUSH_MSGS:
case CAP_PROP_GPHOTO2_FLUSH_MSGS:
return false;
default:
widget = setGenericProperty(propertyId, value, output);
@ -1002,7 +1002,7 @@ void DigitalCameraCapture::readFrameFromFile(CameraFile * file, OutputArray outp
CR(gp_file_get_data_and_size(file, &data, &size));
if (size > 0)
{
Mat buf = Mat(1, size, CV_8UC1, (void *) data);
Mat buf(1, size, CV_8UC1, (void *) data);
if(!buf.empty())
{
frame = imdecode(buf, IMREAD_UNCHANGED);

View File

@ -287,10 +287,10 @@ std::string get_gst_propname(int propId)
{
switch (propId)
{
case CV_CAP_PROP_BRIGHTNESS: return "brightness";
case CV_CAP_PROP_CONTRAST: return "contrast";
case CV_CAP_PROP_SATURATION: return "saturation";
case CV_CAP_PROP_HUE: return "hue";
case CAP_PROP_BRIGHTNESS: return "brightness";
case CAP_PROP_CONTRAST: return "contrast";
case CAP_PROP_SATURATION: return "saturation";
case CAP_PROP_HUE: return "hue";
default: return std::string();
}
}
@ -1115,14 +1115,14 @@ bool GStreamerCapture::retrieveVideoFrame(int, OutputArray dst)
else if (name == "video/x-bayer")
{
CV_CheckEQ((int)n_planes, 0, "");
Mat src = Mat(sz, CV_8UC1, frame.map[0].data);
Mat src(sz, CV_8UC1, frame.map[0].data);
src.copyTo(dst);
return true;
}
else if (name == "image/jpeg")
{
CV_CheckEQ((int)n_planes, 0, "");
Mat src = Mat(Size(frame.map[0].size, 1), CV_8UC1, frame.map[0].data);
Mat src(Size(frame.map[0].size, 1), CV_8UC1, frame.map[0].data);
src.copyTo(dst);
return true;
}
@ -1326,7 +1326,7 @@ void GStreamerCapture::newPad(GstElement *, GstPad *pad, gpointer data)
/*!
* \brief Create GStreamer pipeline
* \param filename Filename to open in case of CV_CAP_GSTREAMER_FILE
* \param filename Filename to open in case of CAP_GSTREAMER_FILE
* \return boolean. Specifies if opening was successful.
*
* In case of camera 'index', a pipeline is constructed as follows:
@ -1862,9 +1862,9 @@ double GStreamerCapture::getProperty(int propId) const
switch(propId)
{
case CV_CAP_PROP_POS_MSEC:
case CAP_PROP_POS_MSEC:
return double(timestamp) / GST_MSECOND;
case CV_CAP_PROP_POS_FRAMES:
case CAP_PROP_POS_FRAMES:
if (!isPosFramesSupported)
{
if (isPosFramesEmulated)
@ -1879,7 +1879,7 @@ double GStreamerCapture::getProperty(int propId) const
return 0;
}
return value;
case CV_CAP_PROP_POS_AVI_RATIO:
case CAP_PROP_POS_AVI_RATIO:
format = GST_FORMAT_PERCENT;
status = gst_element_query_position(sink.get(), CV_GST_FORMAT(format), &value);
if(!status) {
@ -1888,18 +1888,18 @@ double GStreamerCapture::getProperty(int propId) const
return 0;
}
return ((double) value) / GST_FORMAT_PERCENT_MAX;
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
return width;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
return height;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
return fps;
case CV_CAP_PROP_FRAME_COUNT:
case CAP_PROP_FRAME_COUNT:
return (double)duration;
case CV_CAP_PROP_BRIGHTNESS:
case CV_CAP_PROP_CONTRAST:
case CV_CAP_PROP_SATURATION:
case CV_CAP_PROP_HUE:
case CAP_PROP_BRIGHTNESS:
case CAP_PROP_CONTRAST:
case CAP_PROP_SATURATION:
case CAP_PROP_HUE:
if (v4l2src)
{
std::string propName = get_gst_propname(propId);
@ -1915,7 +1915,7 @@ double GStreamerCapture::getProperty(int propId) const
return static_cast<double>(va_type);
case CAP_PROP_HW_DEVICE:
return static_cast<double>(hw_device);
case CV_CAP_GSTREAMER_QUEUE_LENGTH:
case CAP_PROP_GSTREAMER_QUEUE_LENGTH:
if(!sink)
{
CV_WARN("there is no sink yet");
@ -1972,14 +1972,14 @@ bool GStreamerCapture::setProperty(int propId, double value)
return false;
}
bool needRestart = this->isPipelinePlaying() && (propId == CV_CAP_PROP_FRAME_WIDTH || propId == CV_CAP_PROP_FRAME_HEIGHT || propId == CV_CAP_PROP_FPS);
bool needRestart = this->isPipelinePlaying() && (propId == CAP_PROP_FRAME_WIDTH || propId == CAP_PROP_FRAME_HEIGHT || propId == CAP_PROP_FPS);
if (needRestart) {
this->stopPipeline();
}
switch(propId)
{
case CV_CAP_PROP_POS_MSEC:
case CAP_PROP_POS_MSEC:
{
if(!gst_element_seek_simple(GST_ELEMENT(pipeline.get()), GST_FORMAT_TIME,
flags, (gint64) (value * GST_MSECOND))) {
@ -2006,7 +2006,7 @@ bool GStreamerCapture::setProperty(int propId, double value)
}
return true;
}
case CV_CAP_PROP_POS_FRAMES:
case CAP_PROP_POS_FRAMES:
{
if (!isPosFramesSupported)
{
@ -2037,7 +2037,7 @@ bool GStreamerCapture::setProperty(int propId, double value)
gst_element_get_state(pipeline, NULL, NULL, GST_CLOCK_TIME_NONE);
return true;
}
case CV_CAP_PROP_POS_AVI_RATIO:
case CAP_PROP_POS_AVI_RATIO:
{
// https://stackoverflow.com/questions/31290315
// GStreamer docs: GST_FORMAT_PERCENT (5) percentage of stream (few, if any, elements implement this as of May 2009)
@ -2065,19 +2065,19 @@ bool GStreamerCapture::setProperty(int propId, double value)
}
return true;
}
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
if(value > 0)
setFilter("width", G_TYPE_INT, (int) value, 0);
else
removeFilter("width");
break;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
if(value > 0)
setFilter("height", G_TYPE_INT, (int) value, 0);
else
removeFilter("height");
break;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
if(value > 0) {
int num = 0, denom = 1;
toFraction(value, num, denom);
@ -2085,10 +2085,10 @@ bool GStreamerCapture::setProperty(int propId, double value)
} else
removeFilter("framerate");
break;
case CV_CAP_PROP_BRIGHTNESS:
case CV_CAP_PROP_CONTRAST:
case CV_CAP_PROP_SATURATION:
case CV_CAP_PROP_HUE:
case CAP_PROP_BRIGHTNESS:
case CAP_PROP_CONTRAST:
case CAP_PROP_SATURATION:
case CAP_PROP_HUE:
if (v4l2src)
{
std::string propName = get_gst_propname(propId);
@ -2100,14 +2100,14 @@ bool GStreamerCapture::setProperty(int propId, double value)
}
}
return false;
case CV_CAP_PROP_GAIN:
case CV_CAP_PROP_CONVERT_RGB:
case CAP_PROP_GAIN:
case CAP_PROP_CONVERT_RGB:
break;
case cv::CAP_PROP_HW_ACCELERATION:
return false; // open-only
case cv::CAP_PROP_HW_DEVICE:
return false; // open-only
case CV_CAP_GSTREAMER_QUEUE_LENGTH:
case CAP_PROP_GSTREAMER_QUEUE_LENGTH:
{
if(!sink)
{
@ -2182,7 +2182,7 @@ Ptr<IVideoCapture> createGStreamerCapture_cam(int index, const cv::VideoCaptureP
* \brief The CvVideoWriter_GStreamer class
* Use GStreamer to write video
*/
class CvVideoWriter_GStreamer : public CvVideoWriter
class CvVideoWriter_GStreamer : public IVideoWriter
{
public:
CvVideoWriter_GStreamer()
@ -2212,11 +2212,12 @@ public:
bool open(const std::string &filename, int fourcc,
double fps, const Size &frameSize, const VideoWriterParameters& params );
void close();
bool writeFrame( const IplImage* image ) CV_OVERRIDE;
void write(InputArray) CV_OVERRIDE;
int getIplDepth() const { return ipl_depth; }
virtual double getProperty(int) const CV_OVERRIDE;
double getProperty(int) const CV_OVERRIDE;
bool isOpened() const CV_OVERRIDE { return pipeline && source; }
protected:
const char* filenameToMimetype(const char* filename);
@ -2540,7 +2541,7 @@ bool CvVideoWriter_GStreamer::open( const std::string &filename, int fourcc,
if (fourcc == CV_FOURCC('M','J','P','G') && frameSize.height == 1)
{
CV_Assert(depth == CV_8U);
ipl_depth = IPL_DEPTH_8U;
ipl_depth = CV_8U;
input_pix_fmt = GST_VIDEO_FORMAT_ENCODED;
caps.attach(gst_caps_new_simple("image/jpeg",
"framerate", GST_TYPE_FRACTION, int(fps_num), int(fps_denom),
@ -2550,7 +2551,7 @@ bool CvVideoWriter_GStreamer::open( const std::string &filename, int fourcc,
else if (is_color)
{
CV_Assert(depth == CV_8U);
ipl_depth = IPL_DEPTH_8U;
ipl_depth = CV_8U;
input_pix_fmt = GST_VIDEO_FORMAT_BGR;
bufsize = frameSize.width * frameSize.height * 3;
@ -2566,7 +2567,7 @@ bool CvVideoWriter_GStreamer::open( const std::string &filename, int fourcc,
}
else if (!is_color && depth == CV_8U)
{
ipl_depth = IPL_DEPTH_8U;
ipl_depth = CV_8U;
input_pix_fmt = GST_VIDEO_FORMAT_GRAY8;
bufsize = frameSize.width * frameSize.height;
@ -2580,7 +2581,7 @@ bool CvVideoWriter_GStreamer::open( const std::string &filename, int fourcc,
}
else if (!is_color && depth == CV_16U)
{
ipl_depth = IPL_DEPTH_16U;
ipl_depth = CV_16U;
input_pix_fmt = GST_VIDEO_FORMAT_GRAY16_LE;
bufsize = frameSize.width * frameSize.height * 2;
@ -2670,53 +2671,53 @@ bool CvVideoWriter_GStreamer::open( const std::string &filename, int fourcc,
* The timestamp for the buffer is generated from the framerate set in open
* and ensures a smooth video
*/
bool CvVideoWriter_GStreamer::writeFrame( const IplImage * image )
void CvVideoWriter_GStreamer::write(InputArray image)
{
GstClockTime duration, timestamp;
GstFlowReturn ret;
int size;
handleMessage(pipeline);
if (input_pix_fmt == GST_VIDEO_FORMAT_ENCODED) {
if (image->nChannels != 1 || image->depth != IPL_DEPTH_8U || image->height != 1) {
CV_WARN("cvWriteFrame() needs images with depth = IPL_DEPTH_8U, nChannels = 1 and height = 1.");
return false;
if (image.type() != CV_8UC1 || image.size().height != 1) {
CV_WARN("write frame skipped - expected CV_8UC1, height==1");
return;
}
}
else
if(input_pix_fmt == GST_VIDEO_FORMAT_BGR) {
if (image->nChannels != 3 || image->depth != IPL_DEPTH_8U) {
CV_WARN("cvWriteFrame() needs images with depth = IPL_DEPTH_8U and nChannels = 3.");
return false;
if (image.type() != CV_8UC3) {
CV_WARN("write frame skipped - expected CV_8UC3");
return;
}
}
else if (input_pix_fmt == GST_VIDEO_FORMAT_GRAY8) {
if (image->nChannels != 1 || image->depth != IPL_DEPTH_8U) {
CV_WARN("cvWriteFrame() needs images with depth = IPL_DEPTH_8U and nChannels = 1.");
return false;
if (image.type() != CV_8UC1) {
CV_WARN("write frame skipped - expected CV_8UC1");
return;
}
}
else if (input_pix_fmt == GST_VIDEO_FORMAT_GRAY16_LE) {
if (image->nChannels != 1 || image->depth != IPL_DEPTH_16U) {
CV_WARN("cvWriteFrame() needs images with depth = IPL_DEPTH_16U and nChannels = 1.");
return false;
if (image.type() != CV_16UC1) {
CV_WARN("write frame skipped - expected CV_16UC3");
return;
}
}
else {
CV_WARN("cvWriteFrame() needs BGR or grayscale images\n");
return false;
CV_WARN("write frame skipped - unsupported format");
return;
}
size = image->imageSize;
Mat imageMat = image.getMat();
const size_t buf_size = imageMat.total() * imageMat.elemSize();
duration = ((double)1/framerate) * GST_SECOND;
timestamp = num_frames * duration;
//gst_app_src_push_buffer takes ownership of the buffer, so we need to supply it a copy
GstBuffer *buffer = gst_buffer_new_allocate(NULL, size, NULL);
GstBuffer *buffer = gst_buffer_new_allocate(NULL, buf_size, NULL);
GstMapInfo info;
gst_buffer_map(buffer, &info, (GstMapFlags)GST_MAP_READ);
memcpy(info.data, (guint8*)image->imageData, size);
memcpy(info.data, (guint8*)imageMat.data, buf_size);
gst_buffer_unmap(buffer, &info);
GST_BUFFER_DURATION(buffer) = duration;
GST_BUFFER_PTS(buffer) = timestamp;
@ -2728,14 +2729,12 @@ bool CvVideoWriter_GStreamer::writeFrame( const IplImage * image )
if (ret != GST_FLOW_OK)
{
CV_WARN("Error pushing buffer to GStreamer pipeline");
return false;
return;
}
//GST_DEBUG_BIN_TO_DOT_FILE(GST_BIN(pipeline), GST_DEBUG_GRAPH_SHOW_ALL, "pipeline");
++num_frames;
return true;
}
@ -2755,18 +2754,9 @@ double CvVideoWriter_GStreamer::getProperty(int propId) const
Ptr<IVideoWriter> create_GStreamer_writer(const std::string& filename, int fourcc, double fps,
const cv::Size& frameSize, const VideoWriterParameters& params)
{
CvVideoWriter_GStreamer* wrt = new CvVideoWriter_GStreamer;
try
{
if (wrt->open(filename, fourcc, fps, frameSize, params))
return makePtr<LegacyWriter>(wrt);
delete wrt;
}
catch (...)
{
delete wrt;
throw;
}
Ptr<CvVideoWriter_GStreamer> ret = makePtr<CvVideoWriter_GStreamer>();
if (ret->open(filename, fourcc, fps, frameSize, params))
return ret;
return 0;
}
@ -3030,7 +3020,7 @@ CvResult CV_API_CALL cv_writer_open_with_params(
CvVideoWriter_GStreamer* wrt = 0;
try
{
CvSize sz = { width, height };
cv::Size sz { width, height };
VideoWriterParameters parameters(params, n_params);
wrt = new CvVideoWriter_GStreamer();
if (wrt && wrt->open(filename, fourcc, fps, sz, parameters))
@ -3104,11 +3094,11 @@ CvResult CV_API_CALL cv_writer_write(CvPluginWriter handle, const unsigned char
try
{
CvVideoWriter_GStreamer* instance = (CvVideoWriter_GStreamer*)handle;
CvSize sz = { width, height };
IplImage img;
cvInitImageHeader(&img, sz, instance->getIplDepth(), cn);
cvSetData(&img, const_cast<unsigned char*>(data), step);
return instance->writeFrame(&img) ? CV_ERROR_OK : CV_ERROR_FAIL;
const cv::Size sz = { width, height };
const int image_type = CV_MAKE_TYPE(instance->getIplDepth(), cn);
cv::Mat img(sz, image_type, (void*)data, step);
instance->write(img);
return CV_ERROR_OK;
}
catch (const std::exception& e)
{

View File

@ -151,23 +151,23 @@ double CvCapture_Images::getProperty(int id) const
{
switch(id)
{
case CV_CAP_PROP_POS_MSEC:
case cv::CAP_PROP_POS_MSEC:
CV_WARN("collections of images don't have framerates");
return 0;
case CV_CAP_PROP_POS_FRAMES:
case cv::CAP_PROP_POS_FRAMES:
return currentframe;
case CV_CAP_PROP_FRAME_COUNT:
case cv::CAP_PROP_FRAME_COUNT:
return length;
case CV_CAP_PROP_POS_AVI_RATIO:
case cv::CAP_PROP_POS_AVI_RATIO:
return (double)currentframe / (double)(length - 1);
case CV_CAP_PROP_FRAME_WIDTH:
case cv::CAP_PROP_FRAME_WIDTH:
return frame.cols;
case CV_CAP_PROP_FRAME_HEIGHT:
case cv::CAP_PROP_FRAME_HEIGHT:
return frame.rows;
case CV_CAP_PROP_FPS:
case cv::CAP_PROP_FPS:
CV_WARN("collections of images don't have framerates");
return 1;
case CV_CAP_PROP_FOURCC:
case cv::CAP_PROP_FOURCC:
CV_WARN("collections of images don't have 4-character codes");
return 0;
}
@ -178,8 +178,8 @@ bool CvCapture_Images::setProperty(int id, double value)
{
switch(id)
{
case CV_CAP_PROP_POS_MSEC:
case CV_CAP_PROP_POS_FRAMES:
case cv::CAP_PROP_POS_MSEC:
case cv::CAP_PROP_POS_FRAMES:
if(value < 0) {
CV_WARN("seeking to negative positions does not work - clamping");
value = 0;
@ -192,7 +192,7 @@ bool CvCapture_Images::setProperty(int id, double value)
if (currentframe != 0)
grabbedInOpen = false; // grabbed frame is not valid anymore
return true;
case CV_CAP_PROP_POS_AVI_RATIO:
case cv::CAP_PROP_POS_AVI_RATIO:
if(value > 1) {
CV_WARN("seeking beyond end of sequence - clamping");
value = 1;
@ -372,21 +372,17 @@ Ptr<IVideoCapture> create_Images_capture(const std::string &filename)
// image sequence writer
//
//
class CvVideoWriter_Images CV_FINAL : public CvVideoWriter
class CvVideoWriter_Images CV_FINAL : public IVideoWriter
{
public:
CvVideoWriter_Images()
{
filename_pattern.clear();
currentframe = 0;
}
virtual ~CvVideoWriter_Images() { close(); }
virtual bool open( const char* _filename );
virtual void close();
virtual bool setProperty( int, double ); // FIXIT doesn't work: IVideoWriter interface only!
virtual bool writeFrame( const IplImage* ) CV_OVERRIDE;
CvVideoWriter_Images(const std::string & _filename);
void close();
~CvVideoWriter_Images() CV_OVERRIDE { close(); }
double getProperty(int) const CV_OVERRIDE { return 0; }
bool setProperty( int, double ) CV_OVERRIDE; // FIXIT doesn't work: IVideoWriter interface only!
bool isOpened() const CV_OVERRIDE { return !filename_pattern.empty(); }
void write( InputArray ) CV_OVERRIDE;
int getCaptureDomain() const CV_OVERRIDE { return cv::CAP_IMAGES; }
protected:
std::string filename_pattern;
@ -394,7 +390,7 @@ protected:
std::vector<int> params;
};
bool CvVideoWriter_Images::writeFrame( const IplImage* image )
void CvVideoWriter_Images::write(InputArray image)
{
CV_Assert(!filename_pattern.empty());
cv::String filename = cv::format(filename_pattern.c_str(), (int)currentframe);
@ -404,12 +400,9 @@ bool CvVideoWriter_Images::writeFrame( const IplImage* image )
image_params.push_back(0); // append parameters 'stop' mark
image_params.push_back(0);
cv::Mat img = cv::cvarrToMat(image);
bool ret = cv::imwrite(filename, img, image_params);
cv::Mat img = image.getMat();
cv::imwrite(filename, img, image_params);
currentframe++;
return ret;
}
void CvVideoWriter_Images::close()
@ -420,12 +413,11 @@ void CvVideoWriter_Images::close()
}
bool CvVideoWriter_Images::open( const char* _filename )
CvVideoWriter_Images::CvVideoWriter_Images(const std::string & _filename)
{
unsigned offset = 0;
close();
CV_Assert(_filename);
filename_pattern = icvExtractPattern(_filename, &offset);
CV_Assert(!filename_pattern.empty());
@ -433,12 +425,10 @@ bool CvVideoWriter_Images::open( const char* _filename )
if (!cv::haveImageWriter(filename))
{
close();
return false;
}
currentframe = offset;
params.clear();
return true;
}
@ -454,23 +444,9 @@ bool CvVideoWriter_Images::setProperty( int id, double value )
}
Ptr<IVideoWriter> create_Images_writer(const std::string &filename, int, double, const Size &,
const cv::VideoWriterParameters&)
const cv::VideoWriterParameters&)
{
CvVideoWriter_Images *writer = new CvVideoWriter_Images;
try
{
if( writer->open( filename.c_str() ))
return makePtr<LegacyWriter>(writer);
delete writer;
}
catch (...)
{
delete writer;
throw;
}
return 0;
return makePtr<CvVideoWriter_Images>(filename);
}
} // cv::

View File

@ -241,85 +241,31 @@ public:
};
} // namespace
//===================================================
// Utility
static inline void applyMetadataRotation(const IVideoCapture& cap, OutputArray mat)
// Advanced base class for VideoCapture backends providing some extra functionality
class VideoCaptureBase : public IVideoCapture
{
bool rotation_auto = 0 != cap.getProperty(CAP_PROP_ORIENTATION_AUTO);
int rotation_angle = static_cast<int>(cap.getProperty(CAP_PROP_ORIENTATION_META));
if(!rotation_auto || rotation_angle%360 == 0)
{
return;
}
cv::RotateFlags flag;
if(rotation_angle == 90 || rotation_angle == -270) { // Rotate clockwise 90 degrees
flag = cv::ROTATE_90_CLOCKWISE;
} else if(rotation_angle == 270 || rotation_angle == -90) { // Rotate clockwise 270 degrees
flag = cv::ROTATE_90_COUNTERCLOCKWISE;
} else if(rotation_angle == 180 || rotation_angle == -180) { // Rotate clockwise 180 degrees
flag = cv::ROTATE_180;
} else { // Unsupported rotation
return;
}
cv::rotate(mat, mat, flag);
}
//===================================================
// Wrapper
class LegacyCapture : public IVideoCapture
{
private:
CvCapture * cap;
bool autorotate;
LegacyCapture(const LegacyCapture &);
LegacyCapture& operator=(const LegacyCapture &);
bool shouldSwapWidthHeight() const
{
if (!autorotate)
return false;
int rotation = static_cast<int>(cap->getProperty(cv::CAP_PROP_ORIENTATION_META));
return std::abs(rotation % 180) == 90;
}
public:
LegacyCapture(CvCapture * cap_) : cap(cap_), autorotate(true) {}
~LegacyCapture()
{
cvReleaseCapture(&cap);
}
VideoCaptureBase() : autorotate(false) {}
double getProperty(int propId) const CV_OVERRIDE
{
if (!cap)
return 0;
switch(propId)
{
case cv::CAP_PROP_ORIENTATION_AUTO:
return static_cast<double>(autorotate);
case cv::CAP_PROP_FRAME_WIDTH:
return shouldSwapWidthHeight() ? cap->getProperty(cv::CAP_PROP_FRAME_HEIGHT) : cap->getProperty(cv::CAP_PROP_FRAME_WIDTH);
return shouldSwapWidthHeight() ? getProperty_(cv::CAP_PROP_FRAME_HEIGHT) : getProperty_(cv::CAP_PROP_FRAME_WIDTH);
case cv::CAP_PROP_FRAME_HEIGHT:
return shouldSwapWidthHeight() ? cap->getProperty(cv::CAP_PROP_FRAME_WIDTH) : cap->getProperty(cv::CAP_PROP_FRAME_HEIGHT);
return shouldSwapWidthHeight() ? getProperty_(cv::CAP_PROP_FRAME_WIDTH) : getProperty_(cv::CAP_PROP_FRAME_HEIGHT);
default:
return cap->getProperty(propId);
return getProperty_(propId);
}
}
bool setProperty(int propId, double value) CV_OVERRIDE
{
if (!cap)
return false;
switch(propId)
{
case cv::CAP_PROP_ORIENTATION_AUTO:
@ -327,84 +273,55 @@ public:
return true;
default:
return cvSetCaptureProperty(cap, propId, value) != 0;
return setProperty_(propId, value);
}
}
bool grabFrame() CV_OVERRIDE
{
return cap ? cvGrabFrame(cap) != 0 : false;
}
bool retrieveFrame(int channel, OutputArray image) CV_OVERRIDE
{
IplImage* _img = cvRetrieveFrame(cap, channel);
if( !_img )
{
image.release();
const bool res = retrieveFrame_(channel, image);
if (res)
applyMetadataRotation(image);
return res;
}
protected:
virtual double getProperty_(int) const = 0;
virtual bool setProperty_(int, double) = 0;
virtual bool retrieveFrame_(int, OutputArray) = 0;
protected:
bool shouldSwapWidthHeight() const
{
if (!autorotate)
return false;
}
if(_img->origin == IPL_ORIGIN_TL)
{
cv::cvarrToMat(_img).copyTo(image);
}
else
{
Mat temp = cv::cvarrToMat(_img);
flip(temp, image, 0);
}
applyMetadataRotation(*this, image);
return true;
int rotation = static_cast<int>(getProperty(cv::CAP_PROP_ORIENTATION_META));
return std::abs(rotation % 180) == 90;
}
bool isOpened() const CV_OVERRIDE
void applyMetadataRotation(OutputArray mat) const
{
return cap != 0; // legacy interface doesn't support closed files
}
int getCaptureDomain() CV_OVERRIDE
{
return cap ? cap->getCaptureDomain() : 0;
bool rotation_auto = 0 != getProperty(CAP_PROP_ORIENTATION_AUTO);
int rotation_angle = static_cast<int>(getProperty(CAP_PROP_ORIENTATION_META));
if(!rotation_auto || rotation_angle%360 == 0)
{
return;
}
cv::RotateFlags flag;
if(rotation_angle == 90 || rotation_angle == -270) { // Rotate clockwise 90 degrees
flag = cv::ROTATE_90_CLOCKWISE;
} else if(rotation_angle == 270 || rotation_angle == -90) { // Rotate clockwise 270 degrees
flag = cv::ROTATE_90_COUNTERCLOCKWISE;
} else if(rotation_angle == 180 || rotation_angle == -180) { // Rotate clockwise 180 degrees
flag = cv::ROTATE_180;
} else { // Unsupported rotation
return;
}
cv::rotate(mat, mat, flag);
}
CvCapture* getCvCapture() const { return cap; }
protected:
bool autorotate;
};
class LegacyWriter : public IVideoWriter
{
private:
CvVideoWriter * writer;
LegacyWriter(const LegacyWriter &);
LegacyWriter& operator=(const LegacyWriter &);
public:
LegacyWriter(CvVideoWriter * wri_) : writer(wri_)
{}
~LegacyWriter()
{
cvReleaseVideoWriter(&writer);
}
double getProperty(int propId) const CV_OVERRIDE
{
if (writer)
{
return writer->getProperty(propId);
}
return 0.;
}
bool setProperty(int, double) CV_OVERRIDE
{
return false;
}
bool isOpened() const CV_OVERRIDE
{
return writer != NULL;
}
void write(InputArray image) CV_OVERRIDE
{
IplImage _img = cvIplImage(image.getMat());
cvWriteFrame(writer, &_img);
}
int getCaptureDomain() const CV_OVERRIDE
{
return writer ? writer->getCaptureDomain() : 0;
}
};
//==================================================================================================

View File

@ -758,7 +758,7 @@ public:
bool retrieveVideoFrame(OutputArray);
virtual bool retrieveFrame(int, cv::OutputArray) CV_OVERRIDE;
virtual bool isOpened() const CV_OVERRIDE { return isOpen; }
virtual int getCaptureDomain() CV_OVERRIDE { return CV_CAP_MSMF; }
virtual int getCaptureDomain() CV_OVERRIDE { return CAP_MSMF; }
protected:
bool configureOutput();
bool configureAudioOutput(MediaType newType);
@ -2080,131 +2080,131 @@ double CvCapture_MSMF::getProperty( int property_id ) const
if (isOpen)
switch (property_id)
{
case CV_CAP_PROP_MODE:
case CAP_PROP_MODE:
return captureMode;
case cv::CAP_PROP_HW_DEVICE:
return hwDeviceIndex;
case cv::CAP_PROP_HW_ACCELERATION:
return static_cast<double>(va_type);
case CV_CAP_PROP_CONVERT_RGB:
case CAP_PROP_CONVERT_RGB:
return convertFormat ? 1 : 0;
case CV_CAP_PROP_SAR_NUM:
case CAP_PROP_SAR_NUM:
return captureVideoFormat.aspectRatioNum;
case CV_CAP_PROP_SAR_DEN:
case CAP_PROP_SAR_DEN:
return captureVideoFormat.aspectRatioDenom;
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
return captureVideoFormat.width;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
return captureVideoFormat.height;
case CV_CAP_PROP_FOURCC:
case CAP_PROP_FOURCC:
return captureVideoFormat.subType.Data1;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
return captureVideoFormat.getFramerate();
case CV_CAP_PROP_FRAME_COUNT:
case CAP_PROP_FRAME_COUNT:
if (duration != 0)
return floor(((double)duration / 1e7)* captureVideoFormat.getFramerate() + 0.5);
else
break;
case CV_CAP_PROP_POS_FRAMES:
case CAP_PROP_POS_FRAMES:
return (double)nFrame;
case CV_CAP_PROP_POS_MSEC:
case CAP_PROP_POS_MSEC:
return (double)usedVideoSampleTime / 1e4;
case CAP_PROP_AUDIO_POS:
return (double)audioSamplePos;
case CV_CAP_PROP_POS_AVI_RATIO:
case CAP_PROP_POS_AVI_RATIO:
if (duration != 0)
return (double)usedVideoSampleTime / duration;
else
break;
case CV_CAP_PROP_BRIGHTNESS:
case CAP_PROP_BRIGHTNESS:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_Brightness, cVal))
return cVal;
break;
case CV_CAP_PROP_CONTRAST:
case CAP_PROP_CONTRAST:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_Contrast, cVal))
return cVal;
break;
case CV_CAP_PROP_SATURATION:
case CAP_PROP_SATURATION:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_Saturation, cVal))
return cVal;
break;
case CV_CAP_PROP_HUE:
case CAP_PROP_HUE:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_Hue, cVal))
return cVal;
break;
case CV_CAP_PROP_GAIN:
case CAP_PROP_GAIN:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_Gain, cVal))
return cVal;
break;
case CV_CAP_PROP_SHARPNESS:
case CAP_PROP_SHARPNESS:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_Sharpness, cVal))
return cVal;
break;
case CV_CAP_PROP_GAMMA:
case CAP_PROP_GAMMA:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_Gamma, cVal))
return cVal;
break;
case CV_CAP_PROP_BACKLIGHT:
case CAP_PROP_BACKLIGHT:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_BacklightCompensation, cVal))
return cVal;
break;
case CV_CAP_PROP_MONOCHROME:
case CAP_PROP_MONOCHROME:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_ColorEnable, cVal))
return cVal == 0 ? 1 : 0;
break;
case CV_CAP_PROP_TEMPERATURE:
case CAP_PROP_TEMPERATURE:
if (readComplexPropery<IAMVideoProcAmp>(VideoProcAmp_WhiteBalance, cVal))
return cVal;
break;
case CV_CAP_PROP_PAN:
case CAP_PROP_PAN:
if (readComplexPropery<IAMCameraControl>(CameraControl_Pan, cVal))
return cVal;
break;
case CV_CAP_PROP_TILT:
case CAP_PROP_TILT:
if (readComplexPropery<IAMCameraControl>(CameraControl_Tilt, cVal))
return cVal;
break;
case CV_CAP_PROP_ROLL:
case CAP_PROP_ROLL:
if (readComplexPropery<IAMCameraControl>(CameraControl_Roll, cVal))
return cVal;
break;
case CV_CAP_PROP_IRIS:
case CAP_PROP_IRIS:
if (readComplexPropery<IAMCameraControl>(CameraControl_Iris, cVal))
return cVal;
break;
case CV_CAP_PROP_EXPOSURE:
case CV_CAP_PROP_AUTO_EXPOSURE:
case CAP_PROP_EXPOSURE:
case CAP_PROP_AUTO_EXPOSURE:
if (readComplexPropery<IAMCameraControl>(CameraControl_Exposure, cVal))
{
if (property_id == CV_CAP_PROP_EXPOSURE)
if (property_id == CAP_PROP_EXPOSURE)
return cVal;
else
return cVal == VideoProcAmp_Flags_Auto;
}
break;
case CV_CAP_PROP_ZOOM:
case CAP_PROP_ZOOM:
if (readComplexPropery<IAMCameraControl>(CameraControl_Zoom, cVal))
return cVal;
break;
case CV_CAP_PROP_FOCUS:
case CV_CAP_PROP_AUTOFOCUS:
case CAP_PROP_FOCUS:
case CAP_PROP_AUTOFOCUS:
if (readComplexPropery<IAMCameraControl>(CameraControl_Focus, cVal))
{
if (property_id == CV_CAP_PROP_FOCUS)
if (property_id == CAP_PROP_FOCUS)
return cVal;
else
return cVal == VideoProcAmp_Flags_Auto;
}
break;
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
case CV_CAP_PROP_WHITE_BALANCE_RED_V:
case CV_CAP_PROP_RECTIFICATION:
case CV_CAP_PROP_TRIGGER:
case CV_CAP_PROP_TRIGGER_DELAY:
case CV_CAP_PROP_GUID:
case CV_CAP_PROP_ISO_SPEED:
case CV_CAP_PROP_SETTINGS:
case CV_CAP_PROP_BUFFERSIZE:
case CAP_PROP_WHITE_BALANCE_BLUE_U:
case CAP_PROP_WHITE_BALANCE_RED_V:
case CAP_PROP_RECTIFICATION:
case CAP_PROP_TRIGGER:
case CAP_PROP_TRIGGER_DELAY:
case CAP_PROP_GUID:
case CAP_PROP_ISO_SPEED:
case CAP_PROP_SETTINGS:
case CAP_PROP_BUFFERSIZE:
case CAP_PROP_AUDIO_BASE_INDEX:
return audioBaseIndex;
case CAP_PROP_AUDIO_TOTAL_STREAMS:
@ -2246,7 +2246,7 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
if (isOpen)
switch (property_id)
{
case CV_CAP_PROP_MODE:
case CAP_PROP_MODE:
switch ((MSMFCapture_Mode)((int)value))
{
case MODE_SW:
@ -2256,107 +2256,107 @@ bool CvCapture_MSMF::setProperty( int property_id, double value )
default:
return false;
}
case CV_CAP_PROP_FOURCC:
case CAP_PROP_FOURCC:
return configureVideoOutput(newFormat, (int)cvRound(value));
case CV_CAP_PROP_FORMAT:
case CAP_PROP_FORMAT:
return configureVideoOutput(newFormat, (int)cvRound(value));
case CV_CAP_PROP_CONVERT_RGB:
case CAP_PROP_CONVERT_RGB:
convertFormat = (value != 0);
return configureVideoOutput(newFormat, outputVideoFormat);
case CV_CAP_PROP_SAR_NUM:
case CAP_PROP_SAR_NUM:
if (value > 0)
{
newFormat.aspectRatioNum = (UINT32)cvRound(value);
return configureVideoOutput(newFormat, outputVideoFormat);
}
break;
case CV_CAP_PROP_SAR_DEN:
case CAP_PROP_SAR_DEN:
if (value > 0)
{
newFormat.aspectRatioDenom = (UINT32)cvRound(value);
return configureVideoOutput(newFormat, outputVideoFormat);
}
break;
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
if (value >= 0)
{
newFormat.width = (UINT32)cvRound(value);
return configureVideoOutput(newFormat, outputVideoFormat);
}
break;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
if (value >= 0)
{
newFormat.height = (UINT32)cvRound(value);
return configureVideoOutput(newFormat, outputVideoFormat);
}
break;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
if (value >= 0)
{
newFormat.setFramerate(value);
return configureVideoOutput(newFormat, outputVideoFormat);
}
break;
case CV_CAP_PROP_FRAME_COUNT:
case CAP_PROP_FRAME_COUNT:
break;
case CV_CAP_PROP_POS_AVI_RATIO:
case CAP_PROP_POS_AVI_RATIO:
if (duration != 0)
return setTime(duration * value, true);
break;
case CV_CAP_PROP_POS_FRAMES:
case CAP_PROP_POS_FRAMES:
if (std::fabs(captureVideoFormat.getFramerate()) > 0)
return setTime((int)value);
break;
case CV_CAP_PROP_POS_MSEC:
case CAP_PROP_POS_MSEC:
return setTime(value * 1e4, false);
case CV_CAP_PROP_BRIGHTNESS:
case CAP_PROP_BRIGHTNESS:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_Brightness, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_CONTRAST:
case CAP_PROP_CONTRAST:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_Contrast, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_SATURATION:
case CAP_PROP_SATURATION:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_Saturation, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_HUE:
case CAP_PROP_HUE:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_Hue, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_GAIN:
case CAP_PROP_GAIN:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_Gain, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_SHARPNESS:
case CAP_PROP_SHARPNESS:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_Sharpness, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_GAMMA:
case CAP_PROP_GAMMA:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_Gamma, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_BACKLIGHT:
case CAP_PROP_BACKLIGHT:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_BacklightCompensation, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_MONOCHROME:
case CAP_PROP_MONOCHROME:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_ColorEnable, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_TEMPERATURE:
case CAP_PROP_TEMPERATURE:
return writeComplexProperty<IAMVideoProcAmp>(VideoProcAmp_WhiteBalance, value, VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_PAN:
case CAP_PROP_PAN:
return writeComplexProperty<IAMCameraControl>(CameraControl_Pan, value, CameraControl_Flags_Manual);
case CV_CAP_PROP_TILT:
case CAP_PROP_TILT:
return writeComplexProperty<IAMCameraControl>(CameraControl_Tilt, value, CameraControl_Flags_Manual);
case CV_CAP_PROP_ROLL:
case CAP_PROP_ROLL:
return writeComplexProperty<IAMCameraControl>(CameraControl_Roll, value, CameraControl_Flags_Manual);
case CV_CAP_PROP_IRIS:
case CAP_PROP_IRIS:
return writeComplexProperty<IAMCameraControl>(CameraControl_Iris, value, CameraControl_Flags_Manual);
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
return writeComplexProperty<IAMCameraControl>(CameraControl_Exposure, value, CameraControl_Flags_Manual);
case CV_CAP_PROP_AUTO_EXPOSURE:
case CAP_PROP_AUTO_EXPOSURE:
return writeComplexProperty<IAMCameraControl>(CameraControl_Exposure, value, value != 0 ? VideoProcAmp_Flags_Auto : VideoProcAmp_Flags_Manual);
case CV_CAP_PROP_ZOOM:
case CAP_PROP_ZOOM:
return writeComplexProperty<IAMCameraControl>(CameraControl_Zoom, value, CameraControl_Flags_Manual);
case CV_CAP_PROP_FOCUS:
case CAP_PROP_FOCUS:
return writeComplexProperty<IAMCameraControl>(CameraControl_Focus, value, CameraControl_Flags_Manual);
case CV_CAP_PROP_AUTOFOCUS:
case CAP_PROP_AUTOFOCUS:
return writeComplexProperty<IAMCameraControl>(CameraControl_Focus, value, value != 0 ? CameraControl_Flags_Auto : CameraControl_Flags_Manual);
case CV_CAP_PROP_WHITE_BALANCE_BLUE_U:
case CV_CAP_PROP_WHITE_BALANCE_RED_V:
case CV_CAP_PROP_RECTIFICATION:
case CV_CAP_PROP_TRIGGER:
case CV_CAP_PROP_TRIGGER_DELAY:
case CV_CAP_PROP_GUID:
case CV_CAP_PROP_ISO_SPEED:
case CV_CAP_PROP_SETTINGS:
case CV_CAP_PROP_BUFFERSIZE:
case CAP_PROP_WHITE_BALANCE_BLUE_U:
case CAP_PROP_WHITE_BALANCE_RED_V:
case CAP_PROP_RECTIFICATION:
case CAP_PROP_TRIGGER:
case CAP_PROP_TRIGGER_DELAY:
case CAP_PROP_GUID:
case CAP_PROP_ISO_SPEED:
case CAP_PROP_SETTINGS:
case CAP_PROP_BUFFERSIZE:
default:
break;
}

View File

@ -43,6 +43,8 @@
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
using namespace cv;
#ifdef HAVE_OPENNI2
#include <queue>
@ -100,7 +102,7 @@ private:
}
};
class CvCapture_OpenNI2 : public CvCapture
class CvCapture_OpenNI2 : public IVideoCapture
{
public:
enum { DEVICE_DEFAULT=0, DEVICE_MS_KINECT=0, DEVICE_ASUS_XTION=1, DEVICE_ORBBEC_ASTRA=2, DEVICE_MAX=2 };
@ -114,25 +116,17 @@ public:
CvCapture_OpenNI2(int index = 0);
CvCapture_OpenNI2(const char * filename);
virtual ~CvCapture_OpenNI2();
~CvCapture_OpenNI2();
virtual double getProperty(int propIdx) const CV_OVERRIDE;
virtual bool setProperty(int probIdx, double propVal) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int outputType) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE { return cv::CAP_OPENNI2; }
double getProperty(int propIdx) const CV_OVERRIDE;
bool setProperty(int probIdx, double propVal) CV_OVERRIDE;
bool grabFrame() CV_OVERRIDE;
bool retrieveFrame(int outputType, OutputArray arr) CV_OVERRIDE;
int getCaptureDomain() CV_OVERRIDE { return cv::CAP_OPENNI2; }
bool isOpened() const;
bool isOpened() const CV_OVERRIDE;
protected:
struct OutputMap
{
public:
cv::Mat mat;
IplImage* getIplImagePtr();
private:
IplImage iplHeader;
};
static const int outputMapsTypesCount = 8;
@ -140,14 +134,14 @@ protected:
CvCapture_OpenNI2(int index, const char * filename);
IplImage* retrieveDepthMap();
IplImage* retrievePointCloudMap();
IplImage* retrieveDisparityMap();
IplImage* retrieveDisparityMap_32F();
IplImage* retrieveValidDepthMask();
IplImage* retrieveBGRImage();
IplImage* retrieveGrayImage();
IplImage* retrieveIrImage();
Mat retrieveDepthMap();
Mat retrievePointCloudMap();
Mat retrieveDisparityMap();
Mat retrieveDisparityMap_32F();
Mat retrieveValidDepthMask();
Mat retrieveBGRImage();
Mat retrieveGrayImage();
Mat retrieveIrImage();
void toggleStream(int stream, bool toggle);
bool readCamerasParams();
@ -186,18 +180,9 @@ protected:
// The value for pixels without a valid disparity measurement
int noSampleValue;
std::vector<OutputMap> outputMaps;
std::vector<cv::Mat> outputMaps;
};
IplImage* CvCapture_OpenNI2::OutputMap::getIplImagePtr()
{
if( mat.empty() )
return 0;
iplHeader = cvIplImage(mat);
return &iplHeader;
}
bool CvCapture_OpenNI2::isOpened() const
{
return isContextOpened;
@ -296,10 +281,10 @@ CvCapture_OpenNI2::CvCapture_OpenNI2(int index, const char * filename) :
if (needIR)
toggleStream(CV_IR_STREAM, true);
setProperty(CV_CAP_PROP_OPENNI_REGISTRATION, 1.0);
setProperty(CAP_PROP_OPENNI_REGISTRATION, 1.0);
// default for Kinect2 camera
setProperty(CV_CAP_PROP_OPENNI2_MIRROR, 0.0);
setProperty(CAP_PROP_OPENNI2_MIRROR, 0.0);
isContextOpened = true;
@ -440,17 +425,17 @@ double CvCapture_OpenNI2::getProperty( int propIdx ) const
if( isOpened() )
{
int purePropIdx = propIdx & ~CV_CAP_OPENNI_GENERATORS_MASK;
int purePropIdx = propIdx & ~CAP_OPENNI_GENERATORS_MASK;
if( (propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_IMAGE_GENERATOR )
if( (propIdx & CAP_OPENNI_GENERATORS_MASK) == CAP_OPENNI_IMAGE_GENERATOR )
{
propValue = getImageGeneratorProperty( purePropIdx );
}
else if( (propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_DEPTH_GENERATOR )
else if( (propIdx & CAP_OPENNI_GENERATORS_MASK) == CAP_OPENNI_DEPTH_GENERATOR )
{
propValue = getDepthGeneratorProperty( purePropIdx );
}
else if ((propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_IR_GENERATOR)
else if ((propIdx & CAP_OPENNI_GENERATORS_MASK) == CAP_OPENNI_IR_GENERATOR)
{
propValue = getIrGeneratorProperty(purePropIdx);
}
@ -468,17 +453,17 @@ bool CvCapture_OpenNI2::setProperty( int propIdx, double propValue )
bool isSet = false;
if( isOpened() )
{
int purePropIdx = propIdx & ~CV_CAP_OPENNI_GENERATORS_MASK;
int purePropIdx = propIdx & ~CAP_OPENNI_GENERATORS_MASK;
if( (propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_IMAGE_GENERATOR )
if( (propIdx & CAP_OPENNI_GENERATORS_MASK) == CAP_OPENNI_IMAGE_GENERATOR )
{
isSet = setImageGeneratorProperty( purePropIdx, propValue );
}
else if( (propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_DEPTH_GENERATOR )
else if( (propIdx & CAP_OPENNI_GENERATORS_MASK) == CAP_OPENNI_DEPTH_GENERATOR )
{
isSet = setDepthGeneratorProperty( purePropIdx, propValue );
}
else if ((propIdx & CV_CAP_OPENNI_GENERATORS_MASK) == CV_CAP_OPENNI_IR_GENERATOR)
else if ((propIdx & CAP_OPENNI_GENERATORS_MASK) == CAP_OPENNI_IR_GENERATOR)
{
isSet = setIrGeneratorProperty(purePropIdx, propValue);
}
@ -497,19 +482,19 @@ double CvCapture_OpenNI2::getCommonProperty( int propIdx ) const
switch( propIdx )
{
case CV_CAP_PROP_FRAME_WIDTH :
case CV_CAP_PROP_FRAME_HEIGHT :
case CV_CAP_PROP_FPS :
case CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH :
case CV_CAP_PROP_OPENNI_BASELINE :
case CV_CAP_PROP_OPENNI_FOCAL_LENGTH :
case CV_CAP_PROP_OPENNI_REGISTRATION :
case CAP_PROP_FRAME_WIDTH :
case CAP_PROP_FRAME_HEIGHT :
case CAP_PROP_FPS :
case CAP_PROP_OPENNI_FRAME_MAX_DEPTH :
case CAP_PROP_OPENNI_BASELINE :
case CAP_PROP_OPENNI_FOCAL_LENGTH :
case CAP_PROP_OPENNI_REGISTRATION :
propValue = getDepthGeneratorProperty( propIdx );
break;
case CV_CAP_PROP_OPENNI2_SYNC :
case CAP_PROP_OPENNI2_SYNC :
propValue = const_cast<CvCapture_OpenNI2 *>(this)->device.getDepthColorSyncEnabled();
break;
case CV_CAP_PROP_OPENNI2_MIRROR:
case CAP_PROP_OPENNI2_MIRROR:
{
bool isMirroring = false;
for (int i = 0; i < CV_MAX_NUM_STREAMS; ++i)
@ -530,7 +515,7 @@ bool CvCapture_OpenNI2::setCommonProperty( int propIdx, double propValue )
switch( propIdx )
{
case CV_CAP_PROP_OPENNI2_MIRROR:
case CAP_PROP_OPENNI2_MIRROR:
{
bool mirror = propValue > 0.0 ? true : false;
for (int i = 0; i < CV_MAX_NUM_STREAMS; ++i)
@ -542,16 +527,16 @@ bool CvCapture_OpenNI2::setCommonProperty( int propIdx, double propValue )
break;
// There is a set of properties that correspond to depth generator by default
// (is they are pass without particular generator flag).
case CV_CAP_PROP_OPENNI_REGISTRATION:
case CAP_PROP_OPENNI_REGISTRATION:
isSet = setDepthGeneratorProperty(propIdx, propValue);
break;
case CV_CAP_PROP_OPENNI2_SYNC:
case CAP_PROP_OPENNI2_SYNC:
isSet = device.setDepthColorSyncEnabled(propValue > 0.0) == openni::STATUS_OK;
break;
case CV_CAP_PROP_FRAME_WIDTH:
case CV_CAP_PROP_FRAME_HEIGHT:
case CV_CAP_PROP_AUTOFOCUS:
case CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_AUTOFOCUS:
isSet = false;
break;
@ -572,41 +557,41 @@ double CvCapture_OpenNI2::getDepthGeneratorProperty( int propIdx ) const
switch( propIdx )
{
case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT:
case CAP_PROP_OPENNI_GENERATOR_PRESENT:
propValue = streams[CV_DEPTH_STREAM].isValid();
break;
case CV_CAP_PROP_FRAME_WIDTH :
case CAP_PROP_FRAME_WIDTH :
propValue = streams[CV_DEPTH_STREAM].getVideoMode().getResolutionX();
break;
case CV_CAP_PROP_FRAME_HEIGHT :
case CAP_PROP_FRAME_HEIGHT :
propValue = streams[CV_DEPTH_STREAM].getVideoMode().getResolutionY();
break;
case CV_CAP_PROP_FPS :
case CAP_PROP_FPS :
mode = streams[CV_DEPTH_STREAM].getVideoMode();
propValue = mode.getFps();
break;
case CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH :
case CAP_PROP_OPENNI_FRAME_MAX_DEPTH :
propValue = streams[CV_DEPTH_STREAM].getMaxPixelValue();
break;
case CV_CAP_PROP_OPENNI_BASELINE :
case CAP_PROP_OPENNI_BASELINE :
if(baseline <= 0)
if (!const_cast<CvCapture_OpenNI2*>(this)->readCamerasParams())
return 0;
propValue = baseline;
break;
case CV_CAP_PROP_OPENNI_FOCAL_LENGTH :
case CAP_PROP_OPENNI_FOCAL_LENGTH :
if(depthFocalLength_VGA <= 0)
if (!const_cast<CvCapture_OpenNI2*>(this)->readCamerasParams())
return 0;
propValue = (double)depthFocalLength_VGA;
break;
case CV_CAP_PROP_OPENNI_REGISTRATION :
case CAP_PROP_OPENNI_REGISTRATION :
propValue = device.getImageRegistrationMode();
break;
case CV_CAP_PROP_POS_MSEC :
case CAP_PROP_POS_MSEC :
propValue = (double)streamFrames[CV_DEPTH_STREAM].getTimestamp();
break;
case CV_CAP_PROP_POS_FRAMES :
case CAP_PROP_POS_FRAMES :
propValue = streamFrames[CV_DEPTH_STREAM].getFrameIndex();
break;
default :
@ -622,14 +607,14 @@ bool CvCapture_OpenNI2::setDepthGeneratorProperty( int propIdx, double propValue
switch( propIdx )
{
case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT:
case CAP_PROP_OPENNI_GENERATOR_PRESENT:
if (isContextOpened)
{
toggleStream(CV_DEPTH_STREAM, propValue > 0.0);
isSet = true;
}
break;
case CV_CAP_PROP_OPENNI_REGISTRATION:
case CAP_PROP_OPENNI_REGISTRATION:
{
CV_Assert(streams[CV_DEPTH_STREAM].isValid());
if( propValue != 0.0 ) // "on"
@ -690,22 +675,22 @@ double CvCapture_OpenNI2::getImageGeneratorProperty( int propIdx ) const
openni::VideoMode mode;
switch( propIdx )
{
case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT:
case CAP_PROP_OPENNI_GENERATOR_PRESENT:
propValue = streams[CV_COLOR_STREAM].isValid();
break;
case CV_CAP_PROP_FRAME_WIDTH :
case CAP_PROP_FRAME_WIDTH :
propValue = streams[CV_COLOR_STREAM].getVideoMode().getResolutionX();
break;
case CV_CAP_PROP_FRAME_HEIGHT :
case CAP_PROP_FRAME_HEIGHT :
propValue = streams[CV_COLOR_STREAM].getVideoMode().getResolutionY();
break;
case CV_CAP_PROP_FPS :
case CAP_PROP_FPS :
propValue = streams[CV_COLOR_STREAM].getVideoMode().getFps();
break;
case CV_CAP_PROP_POS_MSEC :
case CAP_PROP_POS_MSEC :
propValue = (double)streamFrames[CV_COLOR_STREAM].getTimestamp();
break;
case CV_CAP_PROP_POS_FRAMES :
case CAP_PROP_POS_FRAMES :
propValue = (double)streamFrames[CV_COLOR_STREAM].getFrameIndex();
break;
default :
@ -721,14 +706,14 @@ bool CvCapture_OpenNI2::setImageGeneratorProperty(int propIdx, double propValue)
switch( propIdx )
{
case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT:
case CAP_PROP_OPENNI_GENERATOR_PRESENT:
if (isContextOpened)
{
toggleStream(CV_COLOR_STREAM, propValue > 0.0);
isSet = true;
}
break;
case CV_CAP_PROP_OPENNI_OUTPUT_MODE :
case CAP_PROP_OPENNI_OUTPUT_MODE :
{
if (!streams[CV_COLOR_STREAM].isValid())
return isSet;
@ -736,23 +721,23 @@ bool CvCapture_OpenNI2::setImageGeneratorProperty(int propIdx, double propValue)
switch( cvRound(propValue) )
{
case CV_CAP_OPENNI_VGA_30HZ :
case CAP_OPENNI_VGA_30HZ :
mode.setResolution(640,480);
mode.setFps(30);
break;
case CV_CAP_OPENNI_SXGA_15HZ :
case CAP_OPENNI_SXGA_15HZ :
mode.setResolution(1280, 960);
mode.setFps(15);
break;
case CV_CAP_OPENNI_SXGA_30HZ :
case CAP_OPENNI_SXGA_30HZ :
mode.setResolution(1280, 960);
mode.setFps(30);
break;
case CV_CAP_OPENNI_QVGA_30HZ :
case CAP_OPENNI_QVGA_30HZ :
mode.setResolution(320, 240);
mode.setFps(30);
break;
case CV_CAP_OPENNI_QVGA_60HZ :
case CAP_OPENNI_QVGA_60HZ :
mode.setResolution(320, 240);
mode.setFps(60);
break;
@ -787,22 +772,22 @@ double CvCapture_OpenNI2::getIrGeneratorProperty(int propIdx) const
openni::VideoMode mode;
switch (propIdx)
{
case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT:
case CAP_PROP_OPENNI_GENERATOR_PRESENT:
propValue = streams[CV_IR_STREAM].isValid();
break;
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
propValue = streams[CV_IR_STREAM].getVideoMode().getResolutionX();
break;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
propValue = streams[CV_IR_STREAM].getVideoMode().getResolutionY();
break;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
propValue = streams[CV_IR_STREAM].getVideoMode().getFps();
break;
case CV_CAP_PROP_POS_MSEC:
case CAP_PROP_POS_MSEC:
propValue = (double)streamFrames[CV_IR_STREAM].getTimestamp();
break;
case CV_CAP_PROP_POS_FRAMES:
case CAP_PROP_POS_FRAMES:
propValue = (double)streamFrames[CV_IR_STREAM].getFrameIndex();
break;
default:
@ -818,14 +803,14 @@ bool CvCapture_OpenNI2::setIrGeneratorProperty(int propIdx, double propValue)
switch (propIdx)
{
case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT:
case CAP_PROP_OPENNI_GENERATOR_PRESENT:
if (isContextOpened)
{
toggleStream(CV_IR_STREAM, propValue > 0.0);
isSet = true;
}
break;
case CV_CAP_PROP_OPENNI_OUTPUT_MODE:
case CAP_PROP_OPENNI_OUTPUT_MODE:
{
if (!streams[CV_IR_STREAM].isValid())
return isSet;
@ -833,23 +818,23 @@ bool CvCapture_OpenNI2::setIrGeneratorProperty(int propIdx, double propValue)
switch (cvRound(propValue))
{
case CV_CAP_OPENNI_VGA_30HZ:
case CAP_OPENNI_VGA_30HZ:
mode.setResolution(640, 480);
mode.setFps(30);
break;
case CV_CAP_OPENNI_SXGA_15HZ:
case CAP_OPENNI_SXGA_15HZ:
mode.setResolution(1280, 960);
mode.setFps(15);
break;
case CV_CAP_OPENNI_SXGA_30HZ:
case CAP_OPENNI_SXGA_30HZ:
mode.setResolution(1280, 960);
mode.setFps(30);
break;
case CV_CAP_OPENNI_QVGA_30HZ:
case CAP_OPENNI_QVGA_30HZ:
mode.setResolution(320, 240);
mode.setFps(30);
break;
case CV_CAP_OPENNI_QVGA_60HZ:
case CAP_OPENNI_QVGA_60HZ:
mode.setResolution(320, 240);
mode.setFps(60);
break;
@ -913,20 +898,20 @@ inline void getDepthMapFromMetaData(const openni::VideoFrameRef& depthMetaData,
depthMap.setTo( cv::Scalar::all( CvCapture_OpenNI2::INVALID_PIXEL_VAL ), badMask );
}
IplImage* CvCapture_OpenNI2::retrieveDepthMap()
Mat CvCapture_OpenNI2::retrieveDepthMap()
{
if( !streamFrames[CV_DEPTH_STREAM].isValid() )
return 0;
return Mat();
getDepthMapFromMetaData(streamFrames[CV_DEPTH_STREAM], outputMaps[CV_CAP_OPENNI_DEPTH_MAP].mat, noSampleValue, shadowValue );
getDepthMapFromMetaData(streamFrames[CV_DEPTH_STREAM], outputMaps[CAP_OPENNI_DEPTH_MAP], noSampleValue, shadowValue );
return outputMaps[CV_CAP_OPENNI_DEPTH_MAP].getIplImagePtr();
return outputMaps[CAP_OPENNI_DEPTH_MAP];
}
IplImage* CvCapture_OpenNI2::retrievePointCloudMap()
Mat CvCapture_OpenNI2::retrievePointCloudMap()
{
if( !streamFrames[CV_DEPTH_STREAM].isValid() )
return 0;
return Mat();
cv::Mat depthImg;
getDepthMapFromMetaData(streamFrames[CV_DEPTH_STREAM], depthImg, noSampleValue, shadowValue);
@ -952,9 +937,9 @@ IplImage* CvCapture_OpenNI2::retrievePointCloudMap()
}
}
outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].mat = pointCloud_XYZ;
outputMaps[CAP_OPENNI_POINT_CLOUD_MAP] = pointCloud_XYZ;
return outputMaps[CV_CAP_OPENNI_POINT_CLOUD_MAP].getIplImagePtr();
return outputMaps[CAP_OPENNI_POINT_CLOUD_MAP];
}
static void computeDisparity_32F( const openni::VideoFrameRef& depthMetaData, cv::Mat& disp, double baseline, int F, int noSampleValue, int shadowValue)
@ -980,46 +965,46 @@ static void computeDisparity_32F( const openni::VideoFrameRef& depthMetaData, cv
}
}
IplImage* CvCapture_OpenNI2::retrieveDisparityMap()
Mat CvCapture_OpenNI2::retrieveDisparityMap()
{
if (!streamFrames[CV_DEPTH_STREAM].isValid())
return nullptr;
return Mat();
if (!readCamerasParams())
return nullptr;
return Mat();
cv::Mat disp32;
computeDisparity_32F(streamFrames[CV_DEPTH_STREAM], disp32, baseline, depthFocalLength_VGA, noSampleValue, shadowValue);
disp32.convertTo(outputMaps[CV_CAP_OPENNI_DISPARITY_MAP].mat, CV_8UC1);
disp32.convertTo(outputMaps[CAP_OPENNI_DISPARITY_MAP], CV_8UC1);
return outputMaps[CV_CAP_OPENNI_DISPARITY_MAP].getIplImagePtr();
return outputMaps[CAP_OPENNI_DISPARITY_MAP];
}
IplImage* CvCapture_OpenNI2::retrieveDisparityMap_32F()
Mat CvCapture_OpenNI2::retrieveDisparityMap_32F()
{
if (!streamFrames[CV_DEPTH_STREAM].isValid())
return nullptr;
return Mat();
if (!readCamerasParams())
return nullptr;
return Mat();
computeDisparity_32F(streamFrames[CV_DEPTH_STREAM], outputMaps[CV_CAP_OPENNI_DISPARITY_MAP_32F].mat, baseline, depthFocalLength_VGA, noSampleValue, shadowValue);
computeDisparity_32F(streamFrames[CV_DEPTH_STREAM], outputMaps[CAP_OPENNI_DISPARITY_MAP_32F], baseline, depthFocalLength_VGA, noSampleValue, shadowValue);
return outputMaps[CV_CAP_OPENNI_DISPARITY_MAP_32F].getIplImagePtr();
return outputMaps[CAP_OPENNI_DISPARITY_MAP_32F];
}
IplImage* CvCapture_OpenNI2::retrieveValidDepthMask()
Mat CvCapture_OpenNI2::retrieveValidDepthMask()
{
if (!streamFrames[CV_DEPTH_STREAM].isValid())
return nullptr;
return Mat();
cv::Mat d;
getDepthMapFromMetaData(streamFrames[CV_DEPTH_STREAM], d, noSampleValue, shadowValue);
outputMaps[CV_CAP_OPENNI_VALID_DEPTH_MASK].mat = d != CvCapture_OpenNI2::INVALID_PIXEL_VAL;
outputMaps[CAP_OPENNI_VALID_DEPTH_MASK] = d != CvCapture_OpenNI2::INVALID_PIXEL_VAL;
return outputMaps[CV_CAP_OPENNI_VALID_DEPTH_MASK].getIplImagePtr();
return outputMaps[CAP_OPENNI_VALID_DEPTH_MASK];
}
inline void getBGRImageFromMetaData( const openni::VideoFrameRef& imageMetaData, cv::Mat& bgrImage )
@ -1053,100 +1038,102 @@ inline void getGrayImageFromMetaData(const openni::VideoFrameRef& imageMetaData,
}
}
IplImage* CvCapture_OpenNI2::retrieveBGRImage()
Mat CvCapture_OpenNI2::retrieveBGRImage()
{
if( !streamFrames[CV_COLOR_STREAM].isValid() )
return 0;
return Mat();
getBGRImageFromMetaData(streamFrames[CV_COLOR_STREAM], outputMaps[CV_CAP_OPENNI_BGR_IMAGE].mat );
getBGRImageFromMetaData(streamFrames[CV_COLOR_STREAM], outputMaps[CAP_OPENNI_BGR_IMAGE] );
return outputMaps[CV_CAP_OPENNI_BGR_IMAGE].getIplImagePtr();
return outputMaps[CAP_OPENNI_BGR_IMAGE];
}
IplImage* CvCapture_OpenNI2::retrieveGrayImage()
Mat CvCapture_OpenNI2::retrieveGrayImage()
{
if (!streamFrames[CV_COLOR_STREAM].isValid())
return 0;
return Mat();
CV_Assert(streamFrames[CV_COLOR_STREAM].getVideoMode().getPixelFormat() == openni::PIXEL_FORMAT_RGB888); // RGB
cv::Mat rgbImage;
getBGRImageFromMetaData(streamFrames[CV_COLOR_STREAM], rgbImage);
cv::cvtColor( rgbImage, outputMaps[CV_CAP_OPENNI_GRAY_IMAGE].mat, cv::COLOR_BGR2GRAY );
cv::cvtColor( rgbImage, outputMaps[CAP_OPENNI_GRAY_IMAGE], cv::COLOR_BGR2GRAY );
return outputMaps[CV_CAP_OPENNI_GRAY_IMAGE].getIplImagePtr();
return outputMaps[CAP_OPENNI_GRAY_IMAGE];
}
IplImage* CvCapture_OpenNI2::retrieveIrImage()
Mat CvCapture_OpenNI2::retrieveIrImage()
{
if (!streamFrames[CV_IR_STREAM].isValid())
return 0;
return Mat();
getGrayImageFromMetaData(streamFrames[CV_IR_STREAM], outputMaps[CV_CAP_OPENNI_IR_IMAGE].mat);
getGrayImageFromMetaData(streamFrames[CV_IR_STREAM], outputMaps[CAP_OPENNI_IR_IMAGE]);
return outputMaps[CV_CAP_OPENNI_IR_IMAGE].getIplImagePtr();
return outputMaps[CAP_OPENNI_IR_IMAGE];
}
IplImage* CvCapture_OpenNI2::retrieveFrame( int outputType )
bool CvCapture_OpenNI2::retrieveFrame( int outputType, OutputArray arr )
{
IplImage* image = 0;
Mat image;
CV_Assert( outputType < outputMapsTypesCount && outputType >= 0);
if( outputType == CV_CAP_OPENNI_DEPTH_MAP )
if( outputType == CAP_OPENNI_DEPTH_MAP )
{
image = retrieveDepthMap();
}
else if( outputType == CV_CAP_OPENNI_POINT_CLOUD_MAP )
else if( outputType == CAP_OPENNI_POINT_CLOUD_MAP )
{
image = retrievePointCloudMap();
}
else if( outputType == CV_CAP_OPENNI_DISPARITY_MAP )
else if( outputType == CAP_OPENNI_DISPARITY_MAP )
{
image = retrieveDisparityMap();
}
else if( outputType == CV_CAP_OPENNI_DISPARITY_MAP_32F )
else if( outputType == CAP_OPENNI_DISPARITY_MAP_32F )
{
image = retrieveDisparityMap_32F();
}
else if( outputType == CV_CAP_OPENNI_VALID_DEPTH_MASK )
else if( outputType == CAP_OPENNI_VALID_DEPTH_MASK )
{
image = retrieveValidDepthMask();
}
else if( outputType == CV_CAP_OPENNI_BGR_IMAGE )
else if( outputType == CAP_OPENNI_BGR_IMAGE )
{
image = retrieveBGRImage();
}
else if( outputType == CV_CAP_OPENNI_GRAY_IMAGE )
else if( outputType == CAP_OPENNI_GRAY_IMAGE )
{
image = retrieveGrayImage();
}
else if( outputType == CV_CAP_OPENNI_IR_IMAGE )
else if( outputType == CAP_OPENNI_IR_IMAGE )
{
image = retrieveIrImage();
}
return image;
else
{
return false;
}
if (image.empty())
return false;
image.copyTo(arr);
return true;
}
cv::Ptr<cv::IVideoCapture> cv::create_OpenNI2_capture_cam( int index )
{
CvCapture_OpenNI2* capture = new CvCapture_OpenNI2( index );
Ptr<CvCapture_OpenNI2> capture = makePtr<CvCapture_OpenNI2>( index );
if( capture->isOpened() )
return cv::makePtr<cv::LegacyCapture>(capture);
delete capture;
return capture;
return 0;
}
cv::Ptr<cv::IVideoCapture> cv::create_OpenNI2_capture_file( const std::string &filename )
{
CvCapture_OpenNI2* capture = new CvCapture_OpenNI2( filename.c_str() );
Ptr<CvCapture_OpenNI2> capture = makePtr<CvCapture_OpenNI2>( filename.c_str() );
if( capture->isOpened() )
return cv::makePtr<cv::LegacyCapture>(capture);
delete capture;
return capture;
return 0;
}

View File

@ -46,6 +46,8 @@
#include "precomp.hpp"
#include "cap_interface.hpp"
using namespace cv;
#ifdef HAVE_PVAPI
#if !defined _WIN32 && !defined _LINUX
#define _LINUX
@ -71,25 +73,26 @@
/********************* Capturing video from camera via PvAPI *********************/
class CvCaptureCAM_PvAPI : public CvCapture
class CvCaptureCAM_PvAPI : public IVideoCapture
{
public:
CvCaptureCAM_PvAPI();
virtual ~CvCaptureCAM_PvAPI()
~CvCaptureCAM_PvAPI()
{
close();
}
virtual bool open( int index );
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual int getCaptureDomain() CV_OVERRIDE
bool open( int index );
void close();
double getProperty(int) const CV_OVERRIDE;
bool setProperty(int, double) CV_OVERRIDE;
bool grabFrame() CV_OVERRIDE;
bool retrieveFrame(int, OutputArray) CV_OVERRIDE;
int getCaptureDomain() CV_OVERRIDE
{
return CV_CAP_PVAPI;
return CAP_PVAPI;
}
bool isOpened() const CV_OVERRIDE { return Camera.Handle != 0; }
protected:
#ifndef _WIN32
@ -107,7 +110,7 @@ protected:
tPvFrame Frame;
} tCamera;
IplImage *frame;
Mat frame;
tCamera Camera;
tPvErr Errcode;
};
@ -115,7 +118,6 @@ protected:
CvCaptureCAM_PvAPI::CvCaptureCAM_PvAPI()
{
frame = NULL;
memset(&this->Camera, 0, sizeof(this->Camera));
}
@ -218,13 +220,14 @@ bool CvCaptureCAM_PvAPI::grabFrame()
}
IplImage* CvCaptureCAM_PvAPI::retrieveFrame(int)
bool CvCaptureCAM_PvAPI::retrieveFrame(int, OutputArray arr)
{
if (PvCaptureWaitForFrameDone(Camera.Handle, &(Camera.Frame), 1000) == ePvErrSuccess)
{
return frame;
frame.copyTo(arr);
return true;
}
else return NULL;
else return false;
}
double CvCaptureCAM_PvAPI::getProperty( int property_id ) const
@ -233,22 +236,23 @@ double CvCaptureCAM_PvAPI::getProperty( int property_id ) const
switch ( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
PvAttrUint32Get(Camera.Handle, "Width", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
PvAttrUint32Get(Camera.Handle, "Height", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
PvAttrUint32Get(Camera.Handle,"ExposureValue",&nTemp);
return (double)nTemp;
case CV_CAP_PROP_FPS:
case CAP_PROP_FPS:
tPvFloat32 nfTemp;
PvAttrFloat32Get(Camera.Handle, "StatFrameRate", &nfTemp);
return (double)nfTemp;
case CV_CAP_PROP_PVAPI_MULTICASTIP:
char mEnable[2];
char mIp[11];
case CAP_PROP_PVAPI_MULTICASTIP:
{
char mEnable[4] {0};
char mIp[16] {0};
PvAttrEnumGet(Camera.Handle,"MulticastEnable",mEnable,sizeof(mEnable),NULL);
if (strcmp(mEnable, "Off") == 0)
{
@ -262,10 +266,13 @@ double CvCaptureCAM_PvAPI::getProperty( int property_id ) const
sscanf(mIp, "%d.%d.%d.%d", &a, &b, &c, &d); ip = ((a*256 + b)*256 + c)*256 + d;
return (double)ip;
}
case CV_CAP_PROP_GAIN:
}
case CAP_PROP_GAIN:
{
PvAttrUint32Get(Camera.Handle, "GainValue", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
}
case CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
char triggerMode[256];
PvAttrEnumGet(Camera.Handle, "FrameStartTriggerMode", triggerMode, 256, NULL);
if (strcmp(triggerMode, "Freerun")==0)
@ -280,19 +287,19 @@ double CvCaptureCAM_PvAPI::getProperty( int property_id ) const
return 4.0;
else
return -1.0;
case CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL:
case CAP_PROP_PVAPI_DECIMATIONHORIZONTAL:
PvAttrUint32Get(Camera.Handle, "DecimationHorizontal", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL:
case CAP_PROP_PVAPI_DECIMATIONVERTICAL:
PvAttrUint32Get(Camera.Handle, "DecimationVertical", &nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_BINNINGX:
case CAP_PROP_PVAPI_BINNINGX:
PvAttrUint32Get(Camera.Handle,"BinningX",&nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_BINNINGY:
case CAP_PROP_PVAPI_BINNINGY:
PvAttrUint32Get(Camera.Handle,"BinningY",&nTemp);
return (double)nTemp;
case CV_CAP_PROP_PVAPI_PIXELFORMAT:
case CAP_PROP_PVAPI_PIXELFORMAT:
char pixelFormat[256];
PvAttrEnumGet(Camera.Handle, "PixelFormat", pixelFormat,256,NULL);
if (strcmp(pixelFormat, "Mono8")==0)
@ -321,7 +328,7 @@ bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
switch ( property_id )
{
case CV_CAP_PROP_FRAME_WIDTH:
case CAP_PROP_FRAME_WIDTH:
{
tPvUint32 currHeight;
@ -339,7 +346,7 @@ bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
break;
}
case CV_CAP_PROP_FRAME_HEIGHT:
case CAP_PROP_FRAME_HEIGHT:
{
tPvUint32 currWidth;
@ -358,12 +365,12 @@ bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
break;
}
case CV_CAP_PROP_EXPOSURE:
case CAP_PROP_EXPOSURE:
if ((PvAttrUint32Set(Camera.Handle,"ExposureValue",(tPvUint32)value)==ePvErrSuccess))
break;
else
return false;
case CV_CAP_PROP_PVAPI_MULTICASTIP:
case CAP_PROP_PVAPI_MULTICASTIP:
if (value==-1)
{
if ((PvAttrEnumSet(Camera.Handle,"MulticastEnable", "Off")==ePvErrSuccess))
@ -380,13 +387,13 @@ bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
else
return false;
}
case CV_CAP_PROP_GAIN:
case CAP_PROP_GAIN:
if (PvAttrUint32Set(Camera.Handle,"GainValue",(tPvUint32)value)!=ePvErrSuccess)
{
return false;
}
break;
case CV_CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
case CAP_PROP_PVAPI_FRAMESTARTTRIGGERMODE:
if (value==0)
error = PvAttrEnumSet(Camera.Handle, "FrameStartTriggerMode", "Freerun");
else if (value==1)
@ -403,7 +410,7 @@ bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
break;
else
return false;
case CV_CAP_PROP_PVAPI_DECIMATIONHORIZONTAL:
case CAP_PROP_PVAPI_DECIMATIONHORIZONTAL:
if (value >= 1 && value <= 8)
error = PvAttrUint32Set(Camera.Handle, "DecimationHorizontal", value);
else
@ -412,7 +419,7 @@ bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
break;
else
return false;
case CV_CAP_PROP_PVAPI_DECIMATIONVERTICAL:
case CAP_PROP_PVAPI_DECIMATIONVERTICAL:
if (value >= 1 && value <= 8)
error = PvAttrUint32Set(Camera.Handle, "DecimationVertical", value);
else
@ -421,19 +428,19 @@ bool CvCaptureCAM_PvAPI::setProperty( int property_id, double value )
break;
else
return false;
case CV_CAP_PROP_PVAPI_BINNINGX:
case CAP_PROP_PVAPI_BINNINGX:
error = PvAttrUint32Set(Camera.Handle, "BinningX", value);
if(error==ePvErrSuccess)
break;
else
return false;
case CV_CAP_PROP_PVAPI_BINNINGY:
case CAP_PROP_PVAPI_BINNINGY:
error = PvAttrUint32Set(Camera.Handle, "BinningY", value);
if(error==ePvErrSuccess)
break;
else
return false;
case CV_CAP_PROP_PVAPI_PIXELFORMAT:
case CAP_PROP_PVAPI_PIXELFORMAT:
{
cv::String pixelFormat;
@ -524,10 +531,9 @@ bool CvCaptureCAM_PvAPI::resizeCaptureFrame (int frameWidth, int frameHeight)
tPvUint32 sensorHeight;
tPvUint32 sensorWidth;
if (frame)
if (!frame.empty())
{
cvReleaseImage(&frame);
frame = NULL;
frame.release();
}
if (PvAttrUint32Get(Camera.Handle, "SensorWidth", &sensorWidth) != ePvErrSuccess)
@ -566,33 +572,30 @@ bool CvCaptureCAM_PvAPI::resizeCaptureFrame (int frameWidth, int frameHeight)
PvAttrUint32Get(Camera.Handle, "TotalBytesPerFrame", &frameSize);
const cv::Size sz((int)frameWidth, (int)frameHeight);
if ( (strcmp(pixelFormat, "Mono8")==0) || (strcmp(pixelFormat, "Bayer8")==0) )
{
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 1);
frame->widthStep = (int)frameWidth;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = frame->imageData;
frame.create(sz, CV_8UC1);
Camera.Frame.ImageBufferSize = frame.total() * frame.elemSize();
Camera.Frame.ImageBuffer = frame.data;
}
else if ( (strcmp(pixelFormat, "Mono16")==0) || (strcmp(pixelFormat, "Bayer16")==0) )
{
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_16U, 1);
frame->widthStep = (int)frameWidth*2;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = frame->imageData;
frame.create(sz, CV_16UC1);
Camera.Frame.ImageBufferSize = frame.total() * frame.elemSize();
Camera.Frame.ImageBuffer = frame.data;
}
else if ( (strcmp(pixelFormat, "Rgb24")==0) || (strcmp(pixelFormat, "Bgr24")==0) )
{
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 3);
frame->widthStep = (int)frameWidth*3;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = frame->imageData;
frame.create(sz, CV_8UC3);
Camera.Frame.ImageBufferSize = frame.total() * frame.elemSize();
Camera.Frame.ImageBuffer = frame.data;
}
else if ( (strcmp(pixelFormat, "Rgba32")==0) || (strcmp(pixelFormat, "Bgra32")==0) )
{
frame = cvCreateImage(cvSize((int)frameWidth, (int)frameHeight), IPL_DEPTH_8U, 4);
frame->widthStep = (int)frameWidth*4;
Camera.Frame.ImageBufferSize = frameSize;
Camera.Frame.ImageBuffer = frame->imageData;
frame.create(sz, CV_8UC4);
Camera.Frame.ImageBufferSize = frame.total() * frame.elemSize();
Camera.Frame.ImageBuffer = frame.data;
}
else
return false;
@ -602,12 +605,10 @@ bool CvCaptureCAM_PvAPI::resizeCaptureFrame (int frameWidth, int frameHeight)
cv::Ptr<cv::IVideoCapture> cv::create_PvAPI_capture( int index )
{
CvCaptureCAM_PvAPI* capture = new CvCaptureCAM_PvAPI;
Ptr<CvCaptureCAM_PvAPI> capture = makePtr<CvCaptureCAM_PvAPI>();
if ( capture->open( index ))
return cv::makePtr<cv::LegacyCapture>(capture);
delete capture;
return capture;
return NULL;
}
#endif

View File

@ -154,12 +154,12 @@ the symptoms were damaged image and 'Corrupt JPEG data: premature end of data se
prevents bad images in the first place
11th patch: April 2, 2013, Forrest Reiling forrest.reiling@gmail.com
Added v4l2 support for getting capture property CV_CAP_PROP_POS_MSEC.
Added v4l2 support for getting capture property CAP_PROP_POS_MSEC.
Returns the millisecond timestamp of the last frame grabbed or 0 if no frames have been grabbed
Used to successfully synchronize 2 Logitech C310 USB webcams to within 16 ms of one another
12th patch: March 9, 2018, Taylor Lanclos <tlanclos@live.com>
added support for CV_CAP_PROP_BUFFERSIZE
added support for CAP_PROP_BUFFERSIZE
make & enjoy!
@ -361,7 +361,7 @@ struct Buffer
}
};
struct CvCaptureCAM_V4L CV_FINAL : public CvCapture
struct CvCaptureCAM_V4L CV_FINAL : public IVideoCapture
{
int getCaptureDomain() /*const*/ CV_OVERRIDE { return cv::CAP_V4L; }
@ -373,7 +373,7 @@ struct CvCaptureCAM_V4L CV_FINAL : public CvCapture
bool FirstCapture;
String deviceName;
IplImage frame;
Mat frame;
__u32 palette;
int width, height;
@ -381,7 +381,6 @@ struct CvCaptureCAM_V4L CV_FINAL : public CvCapture
int bufferSize;
__u32 fps;
bool convert_rgb;
bool frame_allocated;
bool returnFrame;
// To select a video input set cv::CAP_PROP_CHANNEL to channel number.
// If the new channel number is than 0, then a video input will not change
@ -407,15 +406,15 @@ struct CvCaptureCAM_V4L CV_FINAL : public CvCapture
timeval timestamp;
bool open(int _index);
bool open(const char* deviceName);
bool isOpened() const;
bool open(const std::string & filename);
bool isOpened() const CV_OVERRIDE;
void closeDevice();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
virtual bool grabFrame() CV_OVERRIDE;
virtual IplImage* retrieveFrame(int) CV_OVERRIDE;
virtual bool retrieveFrame(int, OutputArray) CV_OVERRIDE;
CvCaptureCAM_V4L();
virtual ~CvCaptureCAM_V4L();
@ -436,11 +435,9 @@ struct CvCaptureCAM_V4L CV_FINAL : public CvCapture
bool try_palette_v4l2();
bool try_init_v4l2();
bool autosetup_capture_mode_v4l2();
void v4l2_create_frame();
bool read_frame_v4l2();
bool convertableToRgb() const;
void convertToRgb(const Buffer &currentBuffer);
void releaseFrame();
bool havePendingFrame; // true if next .grab() should be noop, .retrive() resets this flag
};
@ -456,13 +453,12 @@ CvCaptureCAM_V4L::CvCaptureCAM_V4L() :
palette(0),
width(0), height(0), width_set(0), height_set(0),
bufferSize(DEFAULT_V4L_BUFFERS),
fps(0), convert_rgb(0), frame_allocated(false), returnFrame(false),
fps(0), convert_rgb(0), returnFrame(false),
channelNumber(-1), normalizePropRange(false),
type(V4L2_BUF_TYPE_VIDEO_CAPTURE),
num_planes(0),
havePendingFrame(false)
{
frame = cvIplImage();
memset(&timestamp, 0, sizeof(timestamp));
}
@ -696,86 +692,6 @@ bool CvCaptureCAM_V4L::convertableToRgb() const
return false;
}
void CvCaptureCAM_V4L::v4l2_create_frame()
{
CvSize size;
int channels = 3;
int depth = IPL_DEPTH_8U;
if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
CV_Assert(form.fmt.pix_mp.width <= (uint)std::numeric_limits<int>::max());
CV_Assert(form.fmt.pix_mp.height <= (uint)std::numeric_limits<int>::max());
size = {(int)form.fmt.pix_mp.width, (int)form.fmt.pix_mp.height};
} else {
CV_Assert(form.fmt.pix.width <= (uint)std::numeric_limits<int>::max());
CV_Assert(form.fmt.pix.height <= (uint)std::numeric_limits<int>::max());
size = {(int)form.fmt.pix.width, (int)form.fmt.pix.height};
}
if (!convert_rgb) {
switch (palette) {
case V4L2_PIX_FMT_BGR24:
case V4L2_PIX_FMT_RGB24:
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ABGR32:
break;
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
channels = 2;
break;
case V4L2_PIX_FMT_YVU420:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_NV12:
case V4L2_PIX_FMT_NV21:
channels = 1;
size.height = size.height * 3 / 2; // "1.5" channels
break;
case V4L2_PIX_FMT_Y16:
case V4L2_PIX_FMT_Y16_BE:
case V4L2_PIX_FMT_Y12:
case V4L2_PIX_FMT_Y10:
depth = IPL_DEPTH_16U;
/* fallthru */
case V4L2_PIX_FMT_GREY:
channels = 1;
break;
case V4L2_PIX_FMT_MJPEG:
case V4L2_PIX_FMT_JPEG:
default:
channels = 1;
if(bufferIndex < 0)
size = cvSize(buffers[MAX_V4L_BUFFERS].memories[MEMORY_ORIG].length, 1);
else {
__u32 bytesused = 0;
if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
__u32 data_offset;
for (unsigned char n_planes = 0; n_planes < num_planes; n_planes++) {
data_offset = buffers[bufferIndex].planes[n_planes].data_offset;
bytesused += buffers[bufferIndex].planes[n_planes].bytesused - data_offset;
}
} else
bytesused = buffers[bufferIndex].buffer.bytesused;
size = cvSize(bytesused, 1);
}
break;
}
}
/* Set up Image data */
cvInitImageHeader(&frame, size, depth, channels);
/* Allocate space for pixelformat we convert to.
* If we do not convert frame is just points to the buffer
*/
releaseFrame();
// we need memory iff convert_rgb is true
if (convert_rgb) {
frame.imageData = (char *)cvAlloc(frame.imageSize);
frame_allocated = true;
}
}
bool CvCaptureCAM_V4L::initCapture()
{
if (!isOpened())
@ -840,8 +756,6 @@ bool CvCaptureCAM_V4L::initCapture()
return false;
}
v4l2_create_frame();
// reinitialize buffers
FirstCapture = true;
@ -993,7 +907,7 @@ bool CvCaptureCAM_V4L::open(int _index)
name = cv::format("/dev/video%d", _index);
}
bool res = open(name.c_str());
bool res = open(name);
if (!res)
{
CV_LOG_WARNING(NULL, "VIDEOIO(V4L2:" << deviceName << "): can't open camera by index");
@ -1001,9 +915,8 @@ bool CvCaptureCAM_V4L::open(int _index)
return res;
}
bool CvCaptureCAM_V4L::open(const char* _deviceName)
bool CvCaptureCAM_V4L::open(const std::string & _deviceName)
{
CV_Assert(_deviceName);
CV_LOG_DEBUG(NULL, "VIDEOIO(V4L2:" << _deviceName << "): opening...");
FirstCapture = true;
width = DEFAULT_V4L_WIDTH;
@ -1012,7 +925,6 @@ bool CvCaptureCAM_V4L::open(const char* _deviceName)
bufferSize = DEFAULT_V4L_BUFFERS;
fps = DEFAULT_V4L_FPS;
convert_rgb = true;
frame_allocated = false;
deviceName = _deviceName;
returnFrame = true;
normalizePropRange = utils::getConfigurationParameterBool("OPENCV_VIDEOIO_V4L_RANGE_NORMALIZED", false);
@ -1519,46 +1431,45 @@ void CvCaptureCAM_V4L::convertToRgb(const Buffer &currentBuffer)
switch (palette)
{
case V4L2_PIX_FMT_YUV411P:
yuv411p_to_rgb24(imageSize.width, imageSize.height,
start, (unsigned char*)frame.imageData);
frame.create(imageSize, CV_8UC3);
yuv411p_to_rgb24(imageSize.width, imageSize.height, start, frame.data);
return;
default:
break;
}
// Converted by cvtColor or imdecode
cv::Mat destination(imageSize, CV_8UC3, frame.imageData);
switch (palette) {
case V4L2_PIX_FMT_YVU420:
cv::cvtColor(cv::Mat(imageSize.height * 3 / 2, imageSize.width, CV_8U, start), destination,
cv::cvtColor(cv::Mat(imageSize.height * 3 / 2, imageSize.width, CV_8U, start), frame,
COLOR_YUV2BGR_YV12);
return;
case V4L2_PIX_FMT_YUV420:
cv::cvtColor(cv::Mat(imageSize.height * 3 / 2, imageSize.width, CV_8U, start), destination,
cv::cvtColor(cv::Mat(imageSize.height * 3 / 2, imageSize.width, CV_8U, start), frame,
COLOR_YUV2BGR_IYUV);
return;
case V4L2_PIX_FMT_NV12:
cv::cvtColor(cv::Mat(imageSize.height * 3 / 2, imageSize.width, CV_8U, start), destination,
cv::cvtColor(cv::Mat(imageSize.height * 3 / 2, imageSize.width, CV_8U, start), frame,
COLOR_YUV2BGR_NV12);
return;
case V4L2_PIX_FMT_NV21:
cv::cvtColor(cv::Mat(imageSize.height * 3 / 2, imageSize.width, CV_8U, start), destination,
cv::cvtColor(cv::Mat(imageSize.height * 3 / 2, imageSize.width, CV_8U, start), frame,
COLOR_YUV2BGR_NV21);
return;
#ifdef HAVE_JPEG
case V4L2_PIX_FMT_MJPEG:
case V4L2_PIX_FMT_JPEG:
CV_LOG_DEBUG(NULL, "VIDEOIO(V4L2:" << deviceName << "): decoding JPEG frame: size=" << currentBuffer.bytesused);
cv::imdecode(Mat(1, currentBuffer.bytesused, CV_8U, start), IMREAD_COLOR, &destination);
cv::imdecode(Mat(1, currentBuffer.bytesused, CV_8U, start), IMREAD_COLOR, &frame);
return;
#endif
case V4L2_PIX_FMT_YUYV:
cv::cvtColor(cv::Mat(imageSize, CV_8UC2, start), destination, COLOR_YUV2BGR_YUYV);
cv::cvtColor(cv::Mat(imageSize, CV_8UC2, start), frame, COLOR_YUV2BGR_YUYV);
return;
case V4L2_PIX_FMT_UYVY:
cv::cvtColor(cv::Mat(imageSize, CV_8UC2, start), destination, COLOR_YUV2BGR_UYVY);
cv::cvtColor(cv::Mat(imageSize, CV_8UC2, start), frame, COLOR_YUV2BGR_UYVY);
return;
case V4L2_PIX_FMT_RGB24:
cv::cvtColor(cv::Mat(imageSize, CV_8UC3, start), destination, COLOR_RGB2BGR);
cv::cvtColor(cv::Mat(imageSize, CV_8UC3, start), frame, COLOR_RGB2BGR);
return;
case V4L2_PIX_FMT_Y16:
{
@ -1567,7 +1478,7 @@ void CvCaptureCAM_V4L::convertToRgb(const Buffer &currentBuffer)
// Note: 10-bits precision is not supported
cv::Mat temp(imageSize, CV_8UC1, buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start);
cv::extractChannel(cv::Mat(imageSize, CV_8UC2, start), temp, 1); // 1 - second channel
cv::cvtColor(temp, destination, COLOR_GRAY2BGR);
cv::cvtColor(temp, frame, COLOR_GRAY2BGR);
return;
}
case V4L2_PIX_FMT_Y16_BE:
@ -1577,21 +1488,21 @@ void CvCaptureCAM_V4L::convertToRgb(const Buffer &currentBuffer)
// Note: 10-bits precision is not supported
cv::Mat temp(imageSize, CV_8UC1, buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start);
cv::extractChannel(cv::Mat(imageSize, CV_8UC2, start), temp, 0); // 0 - first channel
cv::cvtColor(temp, destination, COLOR_GRAY2BGR);
cv::cvtColor(temp, frame, COLOR_GRAY2BGR);
return;
}
case V4L2_PIX_FMT_Y12:
{
cv::Mat temp(imageSize, CV_8UC1, buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start);
cv::Mat(imageSize, CV_16UC1, start).convertTo(temp, CV_8U, 1.0 / 16);
cv::cvtColor(temp, destination, COLOR_GRAY2BGR);
cv::cvtColor(temp, frame, COLOR_GRAY2BGR);
return;
}
case V4L2_PIX_FMT_Y10:
{
cv::Mat temp(imageSize, CV_8UC1, buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start);
cv::Mat(imageSize, CV_16UC1, start).convertTo(temp, CV_8U, 1.0 / 4);
cv::cvtColor(temp, destination, COLOR_GRAY2BGR);
cv::cvtColor(temp, frame, COLOR_GRAY2BGR);
return;
}
case V4L2_PIX_FMT_SN9C10X:
@ -1601,40 +1512,39 @@ void CvCaptureCAM_V4L::convertToRgb(const Buffer &currentBuffer)
start, (unsigned char*)buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start);
cv::Mat cv_buf(imageSize, CV_8UC1, buffers[MAX_V4L_BUFFERS].memories[MEMORY_RGB].start);
cv::cvtColor(cv_buf, destination, COLOR_BayerRG2BGR);
cv::cvtColor(cv_buf, frame, COLOR_BayerRG2BGR);
return;
}
case V4L2_PIX_FMT_SRGGB8:
{
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_BayerBG2BGR);
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), frame, COLOR_BayerBG2BGR);
return;
}
case V4L2_PIX_FMT_SBGGR8:
{
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_BayerRG2BGR);
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), frame, COLOR_BayerRG2BGR);
return;
}
case V4L2_PIX_FMT_SGBRG8:
{
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_BayerGR2BGR);
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), frame, COLOR_BayerGR2BGR);
return;
}
case V4L2_PIX_FMT_SGRBG8:
{
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_BayerGB2BGR);
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), frame, COLOR_BayerGB2BGR);
return;
}
case V4L2_PIX_FMT_GREY:
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), destination, COLOR_GRAY2BGR);
cv::cvtColor(cv::Mat(imageSize, CV_8UC1, start), frame, COLOR_GRAY2BGR);
break;
case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ABGR32:
cv::cvtColor(cv::Mat(imageSize, CV_8UC4, start), destination, COLOR_BGRA2BGR);
cv::cvtColor(cv::Mat(imageSize, CV_8UC4, start), frame, COLOR_BGRA2BGR);
break;
case V4L2_PIX_FMT_BGR24:
default:
memcpy((char *)frame.imageData, start,
std::min(frame.imageSize, (int)currentBuffer.bytesused));
Mat(1, currentBuffer.bytesused, CV_8U, start).copyTo(frame);
break;
}
}
@ -1921,7 +1831,7 @@ double CvCaptureCAM_V4L::getProperty(int property_id) const
case cv::CAP_PROP_FOURCC:
return palette;
case cv::CAP_PROP_FORMAT:
return CV_MAKETYPE(IPL2CV_DEPTH(frame.depth), frame.nChannels);
return frame.type();
case cv::CAP_PROP_MODE:
if (normalizePropRange)
return palette;
@ -2000,8 +1910,6 @@ bool CvCaptureCAM_V4L::setProperty( int property_id, double _value )
return convert_rgb;
}else{
convert_rgb = false;
releaseFrame();
v4l2_create_frame();
return true;
}
case cv::CAP_PROP_FOURCC:
@ -2063,18 +1971,8 @@ bool CvCaptureCAM_V4L::setProperty( int property_id, double _value )
return false;
}
void CvCaptureCAM_V4L::releaseFrame()
{
if (frame_allocated && frame.imageData) {
cvFree(&frame.imageData);
frame_allocated = false;
}
}
void CvCaptureCAM_V4L::releaseBuffers()
{
releaseFrame();
if (buffers[MAX_V4L_BUFFERS].memories[MEMORY_ORIG].start) {
free(buffers[MAX_V4L_BUFFERS].memories[MEMORY_ORIG].start);
buffers[MAX_V4L_BUFFERS].memories[MEMORY_ORIG].start = 0;
@ -2135,42 +2033,43 @@ bool CvCaptureCAM_V4L::streaming(bool startStream)
return startStream;
}
IplImage *CvCaptureCAM_V4L::retrieveFrame(int)
bool CvCaptureCAM_V4L::retrieveFrame(int, OutputArray ret)
{
havePendingFrame = false; // unlock .grab()
if (bufferIndex < 0)
return &frame;
frame.copyTo(ret);
/* Now get what has already been captured as a IplImage return */
const Buffer &currentBuffer = buffers[bufferIndex];
if (convert_rgb) {
if (!frame_allocated)
v4l2_create_frame();
convertToRgb(currentBuffer);
} else {
// for mjpeg streams the size might change in between, so we have to change the header
// We didn't allocate memory when not convert_rgb, but we have to recreate the header
CV_LOG_DEBUG(NULL, "VIDEOIO(V4L2:" << deviceName << "): buffer input size=" << currentBuffer.bytesused);
if (frame.imageSize != (int)currentBuffer.bytesused)
v4l2_create_frame();
frame.imageData = (char *)buffers[MAX_V4L_BUFFERS].memories[MEMORY_ORIG].start;
if (V4L2_TYPE_IS_MULTIPLANAR(type)) {
// calculate total size
__u32 bytestotal = 0;
for (unsigned char n_planes = 0; n_planes < num_planes; n_planes++) {
const v4l2_plane & cur_plane = currentBuffer.planes[n_planes];
bytestotal += cur_plane.bytesused - cur_plane.data_offset;
}
// allocate frame data
frame.create(Size(bytestotal, 1), CV_8U);
// copy each plane to the frame
__u32 offset = 0;
for (unsigned char n_planes = 0; n_planes < num_planes; n_planes++) {
__u32 data_offset, bytesused;
data_offset = currentBuffer.planes[n_planes].data_offset;
bytesused = currentBuffer.planes[n_planes].bytesused - data_offset;
memcpy((unsigned char*)buffers[MAX_V4L_BUFFERS].memories[MEMORY_ORIG].start + offset,
(char *)currentBuffer.memories[n_planes].start + data_offset,
std::min(currentBuffer.memories[n_planes].length, (size_t)bytesused));
offset += bytesused;
const v4l2_plane & cur_plane = currentBuffer.planes[n_planes];
const Memory & cur_mem = currentBuffer.memories[n_planes];
memcpy(frame.data + offset,
(char*)cur_mem.start + cur_plane.data_offset,
std::min(currentBuffer.memories[n_planes].length, (size_t)cur_plane.bytesused));
}
} else {
memcpy(buffers[MAX_V4L_BUFFERS].memories[MEMORY_ORIG].start, currentBuffer.memories[MEMORY_ORIG].start,
std::min(buffers[MAX_V4L_BUFFERS].memories[MEMORY_ORIG].length, (size_t)currentBuffer.buffer.bytesused));
const Size sz(std::min(buffers[MAX_V4L_BUFFERS].memories[MEMORY_ORIG].length, (size_t)currentBuffer.buffer.bytesused), 1);
frame = Mat(sz, CV_8U, currentBuffer.memories[MEMORY_ORIG].start);
}
}
//Revert buffer to the queue
@ -2180,28 +2079,23 @@ IplImage *CvCaptureCAM_V4L::retrieveFrame(int)
}
bufferIndex = -1;
return &frame;
frame.copyTo(ret);
return true;
}
Ptr<IVideoCapture> create_V4L_capture_cam(int index)
{
cv::CvCaptureCAM_V4L* capture = new cv::CvCaptureCAM_V4L();
if (capture->open(index))
return makePtr<LegacyCapture>(capture);
delete capture;
Ptr<CvCaptureCAM_V4L> ret = makePtr<CvCaptureCAM_V4L>();
if (ret->open(index))
return ret;
return NULL;
}
Ptr<IVideoCapture> create_V4L_capture_file(const std::string &filename)
{
cv::CvCaptureCAM_V4L* capture = new cv::CvCaptureCAM_V4L();
if (capture->open(filename.c_str()))
return makePtr<LegacyCapture>(capture);
delete capture;
auto ret = makePtr<CvCaptureCAM_V4L>();
if (ret->open(filename))
return ret;
return NULL;
}
@ -2272,12 +2166,7 @@ bool VideoCapture_V4L_waitAny(const std::vector<VideoCapture>& streams, CV_OUT s
for (size_t i = 0; i < N; ++i)
{
IVideoCapture* iCap = internal::VideoCapturePrivateAccessor::getIVideoCapture(streams[i]);
LegacyCapture* legacyCapture = dynamic_cast<LegacyCapture*>(iCap);
CV_Assert(legacyCapture);
CvCapture* cvCap = legacyCapture->getCvCapture();
CV_Assert(cvCap);
CvCaptureCAM_V4L *ptr_CvCaptureCAM_V4L = dynamic_cast<CvCaptureCAM_V4L*>(cvCap);
CvCaptureCAM_V4L *ptr_CvCaptureCAM_V4L = dynamic_cast<CvCaptureCAM_V4L*>(iCap);
CV_Assert(ptr_CvCaptureCAM_V4L);
capPtr[i] = ptr_CvCaptureCAM_V4L;
}

File diff suppressed because it is too large Load Diff

View File

@ -213,13 +213,13 @@ class XINECapture : public IVideoCapture
switch (property_id)
{
case CV_CAP_PROP_POS_MSEC: return res ? pos_t : 0;
case CV_CAP_PROP_POS_FRAMES: return frame_number;
case CV_CAP_PROP_POS_AVI_RATIO: return length && res ? pos_l / 65535.0 : 0.0;
case CV_CAP_PROP_FRAME_WIDTH: return size.width;
case CV_CAP_PROP_FRAME_HEIGHT: return size.height;
case CV_CAP_PROP_FPS: return frame_rate;
case CV_CAP_PROP_FOURCC: return (double)xine_get_stream_info(stream, XINE_STREAM_INFO_VIDEO_FOURCC);
case CAP_PROP_POS_MSEC: return res ? pos_t : 0;
case CAP_PROP_POS_FRAMES: return frame_number;
case CAP_PROP_POS_AVI_RATIO: return length && res ? pos_l / 65535.0 : 0.0;
case CAP_PROP_FRAME_WIDTH: return size.width;
case CAP_PROP_FRAME_HEIGHT: return size.height;
case CAP_PROP_FPS: return frame_rate;
case CAP_PROP_FOURCC: return (double)xine_get_stream_info(stream, XINE_STREAM_INFO_VIDEO_FOURCC);
}
return 0;
}
@ -230,9 +230,9 @@ class XINECapture : public IVideoCapture
CV_Assert(vo_port);
switch (property_id)
{
case CV_CAP_PROP_POS_MSEC: return seekTime((int)value);
case CV_CAP_PROP_POS_FRAMES: return seekFrame((int)value);
case CV_CAP_PROP_POS_AVI_RATIO: return seekRatio(value);
case CAP_PROP_POS_MSEC: return seekTime((int)value);
case CAP_PROP_POS_FRAMES: return seekFrame((int)value);
case CAP_PROP_POS_AVI_RATIO: return seekRatio(value);
default: return false;
}
}

View File

@ -2,8 +2,8 @@
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "precomp.hpp"
#include "opencv2/videoio/container_avi.private.hpp"
#include <opencv2/core/utils/logger.hpp>
#include <fstream>
#include <limits>
#include <typeinfo>
@ -23,7 +23,8 @@ inline D safe_int_cast(S val, const char * msg = 0)
if (!in_range_r || !in_range_l)
{
if (!msg)
CV_Error_(Error::StsOutOfRange, ("Can not convert integer values (%s -> %s), value 0x%jx is out of range", typeid(S).name(), typeid(D).name(), (uintmax_t)val));
CV_Error(Error::StsOutOfRange,
cv::format("Can not convert integer values (%s -> %s), value 0x%jx is out of range", typeid(S).name(), typeid(D).name(), (uintmax_t)val));
else
CV_Error(Error::StsOutOfRange, msg);
}
@ -269,15 +270,15 @@ VideoInputStream::~VideoInputStream()
AVIReadContainer::AVIReadContainer(): m_stream_id(0), m_movi_start(0), m_movi_end(0), m_width(0), m_height(0), m_fps(0), m_is_indx_present(false)
{
m_file_stream = makePtr<VideoInputStream>();
m_file_stream = std::make_shared<VideoInputStream>();
}
void AVIReadContainer::initStream(const String &filename)
{
m_file_stream = makePtr<VideoInputStream>(filename);
m_file_stream = std::make_shared<VideoInputStream>(filename);
}
void AVIReadContainer::initStream(Ptr<VideoInputStream> m_file_stream_)
void AVIReadContainer::initStream(std::shared_ptr<VideoInputStream> m_file_stream_)
{
m_file_stream = m_file_stream_;
}

View File

@ -53,9 +53,8 @@
#include <unistd.h> // -D_FORTIFY_SOURCE=2 workaround: https://github.com/opencv/opencv/issues/15020
#endif
#include "opencv2/core/cvdef.h"
#include "opencv2/videoio.hpp"
#include "opencv2/videoio/legacy/constants_c.h"
#include "opencv2/core/utility.hpp"
#ifdef __OPENCV_BUILD
@ -75,7 +74,6 @@
#include "opencv2/imgproc.hpp"
#include "opencv2/imgproc/imgproc_c.h"
#include "opencv2/videoio/videoio_c.h"
#include <stdlib.h>
#include <stdio.h>

View File

@ -1,193 +0,0 @@
/*
* filestorage_sample demonstrate the usage of the opencv serialization functionality
*/
#include "opencv2/core.hpp"
#include <iostream>
#include <string>
using std::string;
using std::cout;
using std::endl;
using std::cerr;
using std::ostream;
using namespace cv;
static void help(char** av)
{
cout << "\nfilestorage_sample demonstrate the usage of the opencv serialization functionality.\n"
<< "usage:\n"
<< av[0] << " outputfile.yml.gz\n"
<< "\n outputfile above can have many different extensions, see below."
<< "\nThis program demonstrates the use of FileStorage for serialization, that is in use << and >> in OpenCV\n"
<< "For example, how to create a class and have it serialize, but also how to use it to read and write matrices.\n"
<< "FileStorage allows you to serialize to various formats specified by the file end type."
<< "\nYou should try using different file extensions.(e.g. yaml yml xml xml.gz yaml.gz etc...)\n" << endl;
}
struct MyData
{
MyData() :
A(0), X(0), id()
{
}
explicit MyData(int) :
A(97), X(CV_PI), id("mydata1234")
{
}
int A;
double X;
string id;
void write(FileStorage& fs) const //Write serialization for this class
{
fs << "{" << "A" << A << "X" << X << "id" << id << "}";
}
void read(const FileNode& node) //Read serialization for this class
{
A = (int)node["A"];
X = (double)node["X"];
id = (string)node["id"];
}
};
//These write and read functions must exist as per the inline functions in operations.hpp
static void write(FileStorage& fs, const std::string&, const MyData& x){
x.write(fs);
}
static void read(const FileNode& node, MyData& x, const MyData& default_value = MyData()){
if(node.empty())
x = default_value;
else
x.read(node);
}
static ostream& operator<<(ostream& out, const MyData& m){
out << "{ id = " << m.id << ", ";
out << "X = " << m.X << ", ";
out << "A = " << m.A << "}";
return out;
}
int main(int ac, char** av)
{
cv::CommandLineParser parser(ac, av,
"{@input||}{help h ||}"
);
if (parser.has("help"))
{
help(av);
return 0;
}
string filename = parser.get<string>("@input");
if (filename.empty())
{
help(av);
return 1;
}
//write
{
FileStorage fs(filename, FileStorage::WRITE);
cout << "writing images\n";
fs << "images" << "[";
fs << "image1.jpg" << "myfi.png" << "baboon.jpg";
cout << "image1.jpg" << " myfi.png" << " baboon.jpg" << endl;
fs << "]";
cout << "writing mats\n";
Mat R =Mat_<double>::eye(3, 3),T = Mat_<double>::zeros(3, 1);
cout << "R = " << R << "\n";
cout << "T = " << T << "\n";
fs << "R" << R;
fs << "T" << T;
cout << "writing MyData struct\n";
MyData m(1);
fs << "mdata" << m;
cout << m << endl;
}
//read
{
FileStorage fs(filename, FileStorage::READ);
if (!fs.isOpened())
{
cerr << "failed to open " << filename << endl;
help(av);
return 1;
}
FileNode n = fs["images"];
if (n.type() != FileNode::SEQ)
{
cerr << "images is not a sequence! FAIL" << endl;
return 1;
}
cout << "reading images\n";
FileNodeIterator it = n.begin(), it_end = n.end();
for (; it != it_end; ++it)
{
cout << (string)*it << "\n";
}
Mat R, T;
cout << "reading R and T" << endl;
fs["R"] >> R;
fs["T"] >> T;
cout << "R = " << R << "\n";
cout << "T = " << T << endl;
MyData m;
fs["mdata"] >> m;
cout << "read mdata\n";
cout << m << endl;
cout << "attempting to read mdata_b\n"; //Show default behavior for empty matrix
fs["mdata_b"] >> m;
cout << "read mdata_b\n";
cout << m << endl;
}
cout << "Try opening " << filename << " to see the serialized data." << endl << endl;
//read from string
{
cout << "Read data from string\n";
string dataString =
"%YAML:1.0\n"
"mdata:\n"
" A: 97\n"
" X: 3.1415926535897931e+00\n"
" id: mydata1234\n";
MyData m;
FileStorage fs(dataString, FileStorage::READ | FileStorage::MEMORY);
cout << "attempting to read mdata_b from string\n"; //Show default behavior for empty matrix
fs["mdata"] >> m;
cout << "read mdata\n";
cout << m << endl;
}
//write to string
{
cout << "Write data to string\n";
FileStorage fs(filename, FileStorage::WRITE | FileStorage::MEMORY | FileStorage::FORMAT_YAML);
cout << "writing MyData struct\n";
MyData m(1);
fs << "mdata" << m;
cout << m << endl;
string createdString = fs.releaseAndGetString();
cout << "Created string:\n" << createdString << "\n";
}
return 0;
}

View File

@ -1,19 +0,0 @@
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs/legacy/constants_c.h>
#include <opencv2/videoio/legacy/constants_c.h>
#include <opencv2/photo/legacy/constants_c.h>
#include <opencv2/video/legacy/constants_c.h>
using namespace cv;
int main(int /*argc*/, const char** /*argv*/)
{
std::cout
<< (int)CV_LOAD_IMAGE_GRAYSCALE
<< (int)CV_CAP_FFMPEG
<< std::endl;
return 0;
}

View File

@ -8,14 +8,14 @@ using namespace std;
static void help(char** av)
{
cout << endl
<< av[0] << " shows the usage of the OpenCV serialization functionality." << endl
<< av[0] << " shows the usage of the OpenCV serialization functionality." << endl << endl
<< "usage: " << endl
<< av[0] << " outputfile.yml.gz" << endl
<< "The output file may be either XML (xml) or YAML (yml/yaml). You can even compress it by "
<< "specifying this in its extension like xml.gz yaml.gz etc... " << endl
<< av[0] << " [output file name] (default outputfile.yml.gz)" << endl << endl
<< "The output file may be XML (xml), YAML (yml/yaml), or JSON (json)." << endl
<< "You can even compress it by specifying this in its extension like xml.gz yaml.gz etc... " << endl
<< "With FileStorage you can serialize objects in OpenCV by using the << and >> operators" << endl
<< "For example: - create a class and have it serialized" << endl
<< " - use it to read and write matrices." << endl;
<< " - use it to read and write matrices." << endl << endl;
}
class MyData
@ -68,13 +68,16 @@ static ostream& operator<<(ostream& out, const MyData& m)
int main(int ac, char** av)
{
string filename;
if (ac != 2)
{
help(av);
return 1;
filename = "outputfile.yml.gz";
}
else
filename = av[1];
string filename = av[1];
{ //write
//! [iomati]
Mat R = Mat_<uchar>::eye(3, 3),
@ -118,7 +121,7 @@ int main(int ac, char** av)
//! [close]
fs.release(); // explicit close
//! [close]
cout << "Write Done." << endl;
cout << "Write operation to file:" << filename << " completed successfully." << endl;
}
{//read

View File

@ -78,6 +78,7 @@ int main()
tm.start();
// do something ...
tm.stop();
cout << "Last iteration: " << tm.getLastTimeSec() << endl;
}
cout << "Average time per iteration in seconds: " << tm.getAvgTimeSec() << endl;
cout << "Average FPS: " << tm.getFPS() << endl;

View File

@ -9,10 +9,10 @@ def help(filename):
'''
{0} shows the usage of the OpenCV serialization functionality. \n\n
usage:\n
python3 {0} outputfile.yml.gz\n\n
The output file may be either in XML, YAML or JSON. You can even compress it\n
by specifying this in its extension like xml.gz yaml.gz etc... With\n
FileStorage you can serialize objects in OpenCV.\n\n
python3 {0} [output file name] (default outputfile.yml.gz)\n\n
The output file may be XML (xml), YAML (yml/yaml), or JSON (json).\n
You can even compress it by specifying this in its extension like xml.gz yaml.gz etc...\n
With FileStorage you can serialize objects in OpenCV.\n\n
For example: - create a class and have it serialized\n
- use it to read and write matrices.\n
'''.format(filename)
@ -49,7 +49,9 @@ class MyData:
def main(argv):
if len(argv) != 2:
help(argv[0])
exit(1)
filename = 'outputfile.yml.gz'
else :
filename = argv[1]
# write
## [iomati]
@ -60,8 +62,6 @@ def main(argv):
m = MyData()
## [customIOi]
filename = argv[1]
## [open]
s = cv.FileStorage(filename, cv.FileStorage_WRITE)
# or:
@ -98,7 +98,7 @@ def main(argv):
## [close]
s.release()
## [close]
print ('Write Done.')
print ('Write operation to file:', filename, 'completed successfully.')
# read
print ('\nReading: ')