Merge branch 'opencv:4.x' into 4.x

This commit is contained in:
sujal 2024-12-30 16:09:35 +05:30 committed by GitHub
commit eaacd3dc20
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
52 changed files with 3074 additions and 510 deletions

View File

@ -24,8 +24,6 @@ if(HAVE_FASTCV)
ocv_install_target(fastcv_hal EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev)
endif()
ocv_install_3rdparty_licenses(FastCV "${OpenCV_BINARY_DIR}/3rdparty/fastcv/LICENSE")
if(ENABLE_SOLUTION_FOLDERS)
set_target_properties(fastcv_hal PROPERTIES FOLDER "3rdparty")
endif()

View File

@ -1,8 +1,8 @@
# Binaries branch name: ffmpeg/4.x_20240522
# Binaries were created for OpenCV: 8393885a39dac1e650bf5d0aaff84c04ad8bcdd3
ocv_update(FFMPEG_BINARIES_COMMIT "394dca6ceb3085c979415e6385996b6570e94153")
ocv_update(FFMPEG_FILE_HASH_BIN32 "bdfbd1efb295f3e54c07d2cb7a843bf9")
ocv_update(FFMPEG_FILE_HASH_BIN64 "bfef029900f788480a363d6dc05c4f0e")
# Binaries branch name: ffmpeg/4.x_20241226
# Binaries were created for OpenCV: 09892c9d1706f40342bda0bc404580f63492d9f8
ocv_update(FFMPEG_BINARIES_COMMIT "d63d7c154c57242bf2283be61166be2bd30ec47e")
ocv_update(FFMPEG_FILE_HASH_BIN32 "642b94d032a8292b07550126934173f6")
ocv_update(FFMPEG_FILE_HASH_BIN64 "a8c3560c8f20e1ae465bef81580fa92c")
ocv_update(FFMPEG_FILE_HASH_CMAKE "8862c87496e2e8c375965e1277dee1c7")
function(download_win_ffmpeg script_var)

View File

@ -119,8 +119,8 @@ inline int meanStdDev_8UC4(const uchar* src_data, size_t src_step, int width, in
vec_sqsum = __riscv_vwmaccu_vv_u64m8_tumu(vmask, vec_sqsum, vec_pixel, vec_pixel, vl);
nz += __riscv_vcpop_m_b8(vmask, vl);
}
nz /= 4;
}
nz /= 4;
} else {
for (int i = 0; i < height; i++) {
const uchar* src_row = src_data + i * src_step;

View File

@ -20,9 +20,9 @@ namespace cv { namespace cv_hal_rvv {
#if defined __GNUC__
__attribute__((optimize("no-tree-vectorize")))
#endif
static int merge8u(const uchar** src, uchar* dst, int len, int cn ) {
inline int merge8u(const uchar** src, uchar* dst, int len, int cn ) {
int k = cn % 4 ? cn % 4 : 4;
int i = 0, j;
int i = 0;
int vl = __riscv_vsetvlmax_e8m1();
if( k == 1 )
{
@ -30,7 +30,7 @@ static int merge8u(const uchar** src, uchar* dst, int len, int cn ) {
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle8_v_u8m1(src0 + i, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*2, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*cn, a, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
@ -45,8 +45,8 @@ static int merge8u(const uchar** src, uchar* dst, int len, int cn ) {
{
auto a = __riscv_vle8_v_u8m1(src0 + i, vl);
auto b = __riscv_vle8_v_u8m1(src1 + i, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*2, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 1, sizeof(uchar)*2, b, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*cn, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 1, sizeof(uchar)*cn, b, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
@ -65,9 +65,9 @@ static int merge8u(const uchar** src, uchar* dst, int len, int cn ) {
auto a = __riscv_vle8_v_u8m1(src0 + i, vl);
auto b = __riscv_vle8_v_u8m1(src1 + i, vl);
auto c = __riscv_vle8_v_u8m1(src2 + i, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*3, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 1, sizeof(uchar)*3, b, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 2, sizeof(uchar)*3, c, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*cn, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 1, sizeof(uchar)*cn, b, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 2, sizeof(uchar)*cn, c, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
@ -88,10 +88,10 @@ static int merge8u(const uchar** src, uchar* dst, int len, int cn ) {
auto b = __riscv_vle8_v_u8m1(src1 + i, vl);
auto c = __riscv_vle8_v_u8m1(src2 + i, vl);
auto d = __riscv_vle8_v_u8m1(src3 + i, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*4, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 1, sizeof(uchar)*4, b, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 2, sizeof(uchar)*4, c, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 3, sizeof(uchar)*4, d, vl);
__riscv_vsse8_v_u8m1(dst + i*cn, sizeof(uchar)*cn, a, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 1, sizeof(uchar)*cn, b, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 2, sizeof(uchar)*cn, c, vl);
__riscv_vsse8_v_u8m1(dst + i*cn + 3, sizeof(uchar)*cn, d, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
@ -110,10 +110,27 @@ static int merge8u(const uchar** src, uchar* dst, int len, int cn ) {
for( ; k < cn; k += 4 )
{
const uchar *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
for( i = 0, j = k; i < len; i++, j += cn )
i = 0;
for( ; i <= len - vl; i += vl)
{
dst[j] = src0[i]; dst[j+1] = src1[i];
dst[j+2] = src2[i]; dst[j+3] = src3[i];
auto a = __riscv_vle8_v_u8m1(src0 + i, vl);
auto b = __riscv_vle8_v_u8m1(src1 + i, vl);
auto c = __riscv_vle8_v_u8m1(src2 + i, vl);
auto d = __riscv_vle8_v_u8m1(src3 + i, vl);
__riscv_vsse8_v_u8m1(dst + k+i*cn, sizeof(uchar)*cn, a, vl);
__riscv_vsse8_v_u8m1(dst + k+i*cn + 1, sizeof(uchar)*cn, b, vl);
__riscv_vsse8_v_u8m1(dst + k+i*cn + 2, sizeof(uchar)*cn, c, vl);
__riscv_vsse8_v_u8m1(dst + k+i*cn + 3, sizeof(uchar)*cn, d, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
#endif
for( ; i < len; i++ )
{
dst[k+i*cn] = src0[i];
dst[k+i*cn+1] = src1[i];
dst[k+i*cn+2] = src2[i];
dst[k+i*cn+3] = src3[i];
}
}
return CV_HAL_ERROR_OK;
@ -122,9 +139,9 @@ static int merge8u(const uchar** src, uchar* dst, int len, int cn ) {
#if defined __GNUC__
__attribute__((optimize("no-tree-vectorize")))
#endif
static int merge16u(const ushort** src, ushort* dst, int len, int cn ) {
inline int merge16u(const ushort** src, ushort* dst, int len, int cn ) {
int k = cn % 4 ? cn % 4 : 4;
int i = 0, j;
int i = 0;
int vl = __riscv_vsetvlmax_e16m1();
if( k == 1 )
{
@ -132,7 +149,7 @@ static int merge16u(const ushort** src, ushort* dst, int len, int cn ) {
for( ; i <= len - vl; i += vl)
{
auto a = __riscv_vle16_v_u16m1(src0 + i, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*2, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*cn, a, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
@ -147,8 +164,8 @@ static int merge16u(const ushort** src, ushort* dst, int len, int cn ) {
{
auto a = __riscv_vle16_v_u16m1(src0 + i, vl);
auto b = __riscv_vle16_v_u16m1(src1 + i, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*2, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 1, sizeof(ushort)*2, b, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*cn, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 1, sizeof(ushort)*cn, b, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
@ -167,9 +184,9 @@ static int merge16u(const ushort** src, ushort* dst, int len, int cn ) {
auto a = __riscv_vle16_v_u16m1(src0 + i, vl);
auto b = __riscv_vle16_v_u16m1(src1 + i, vl);
auto c = __riscv_vle16_v_u16m1(src2 + i, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*3, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 1, sizeof(ushort)*3, b, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 2, sizeof(ushort)*3, c, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*cn, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 1, sizeof(ushort)*cn, b, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 2, sizeof(ushort)*cn, c, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
@ -190,10 +207,10 @@ static int merge16u(const ushort** src, ushort* dst, int len, int cn ) {
auto b = __riscv_vle16_v_u16m1(src1 + i, vl);
auto c = __riscv_vle16_v_u16m1(src2 + i, vl);
auto d = __riscv_vle16_v_u16m1(src3 + i, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*4, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 1, sizeof(ushort)*4, b, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 2, sizeof(ushort)*4, c, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 3, sizeof(ushort)*4, d, vl);
__riscv_vsse16_v_u16m1(dst + i*cn, sizeof(ushort)*cn, a, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 1, sizeof(ushort)*cn, b, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 2, sizeof(ushort)*cn, c, vl);
__riscv_vsse16_v_u16m1(dst + i*cn + 3, sizeof(ushort)*cn, d, vl);
}
#if defined(__clang__)
#pragma clang loop vectorize(disable)
@ -212,10 +229,24 @@ static int merge16u(const ushort** src, ushort* dst, int len, int cn ) {
for( ; k < cn; k += 4 )
{
const uint16_t *src0 = src[k], *src1 = src[k+1], *src2 = src[k+2], *src3 = src[k+3];
for( i = 0, j = k; i < len; i++, j += cn )
i = 0;
for( ; i <= len - vl; i += vl)
{
dst[j] = src0[i]; dst[j+1] = src1[i];
dst[j+2] = src2[i]; dst[j+3] = src3[i];
auto a = __riscv_vle16_v_u16m1(src0 + i, vl);
auto b = __riscv_vle16_v_u16m1(src1 + i, vl);
auto c = __riscv_vle16_v_u16m1(src2 + i, vl);
auto d = __riscv_vle16_v_u16m1(src3 + i, vl);
__riscv_vsse16_v_u16m1(dst + k+i*cn, sizeof(ushort)*cn, a, vl);
__riscv_vsse16_v_u16m1(dst + k+i*cn + 1, sizeof(ushort)*cn, b, vl);
__riscv_vsse16_v_u16m1(dst + k+i*cn + 2, sizeof(ushort)*cn, c, vl);
__riscv_vsse16_v_u16m1(dst + k+i*cn + 3, sizeof(ushort)*cn, d, vl);
}
for( ; i < len; i++ )
{
dst[k+i*cn] = src0[i];
dst[k+i*cn+1] = src1[i];
dst[k+i*cn+2] = src2[i];
dst[k+i*cn+3] = src3[i];
}
}
return CV_HAL_ERROR_OK;
@ -224,7 +255,7 @@ static int merge16u(const ushort** src, ushort* dst, int len, int cn ) {
#if defined __GNUC__
__attribute__((optimize("no-tree-vectorize")))
#endif
static int merge32s(const int** src, int* dst, int len, int cn ) {
inline int merge32s(const int** src, int* dst, int len, int cn ) {
int k = cn % 4 ? cn % 4 : 4;
int i, j;
if( k == 1 )
@ -294,7 +325,7 @@ static int merge32s(const int** src, int* dst, int len, int cn ) {
#if defined __GNUC__
__attribute__((optimize("no-tree-vectorize")))
#endif
static int merge64s(const int64** src, int64* dst, int len, int cn ) {
inline int merge64s(const int64** src, int64* dst, int len, int cn ) {
int k = cn % 4 ? cn % 4 : 4;
int i, j;
if( k == 1 )

View File

@ -191,11 +191,6 @@ if(CV_GCC OR CV_CLANG OR CV_ICX)
endif()
add_extra_compiler_option(-fdiagnostics-show-option)
# The -Wno-long-long is required in 64bit systems when including system headers.
if(X86_64)
add_extra_compiler_option(-Wno-long-long)
endif()
# We need pthread's, unless we have explicitly disabled multi-thread execution.
if(NOT OPENCV_DISABLE_THREAD_SUPPORT
AND (

View File

@ -180,19 +180,23 @@ endif(WITH_KLEIDICV)
# --- FastCV ---
if(WITH_FASTCV)
if((EXISTS ${FastCV_INCLUDE_PATH}) AND (EXISTS ${FastCV_LIB_PATH}))
message(STATUS "Use external FastCV ${FastCV_INCLUDE_PATH}, ${FastCV_LIB_PATH}")
set(HAVE_FASTCV TRUE CACHE BOOL "FastCV status")
else()
include("${OpenCV_SOURCE_DIR}/3rdparty/fastcv/fastcv.cmake")
set(FCV_ROOT_DIR "${OpenCV_BINARY_DIR}/3rdparty/fastcv")
download_fastcv(${FCV_ROOT_DIR})
if (HAVE_FASTCV)
if(HAVE_FASTCV)
set(FastCV_INCLUDE_PATH "${FCV_ROOT_DIR}/inc" CACHE PATH "FastCV includes directory")
set(FastCV_LIB_PATH "${FCV_ROOT_DIR}/libs" CACHE PATH "FastCV library directory")
ocv_install_3rdparty_licenses(FastCV "${OpenCV_BINARY_DIR}/3rdparty/fastcv/LICENSE")
install(FILES "${FastCV_LIB_PATH}/libfastcvopt.so"
DESTINATION "${OPENCV_LIB_INSTALL_PATH}" COMPONENT "bin")
else()
set(HAVE_FASTCV FALSE CACHE BOOL "FastCV status")
endif()
if (HAVE_FASTCV)
set(FASTCV_LIBRARY "${FastCV_LIB_PATH}/libfastcvopt.so" CACHE PATH "FastCV library")
endif()
endif()
endif()
if(HAVE_FASTCV)
set(FASTCV_LIBRARY "${FastCV_LIB_PATH}/libfastcvopt.so" CACHE PATH "FastCV library")
endif()
endif(WITH_FASTCV)

View File

@ -46,14 +46,14 @@ Open your Doxyfile using your favorite text editor and search for the key
`TAGFILES`. Change it as follows:
@code
TAGFILES = ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.10.0
TAGFILES = ./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.11.0
@endcode
If you had other definitions already, you can append the line using a `\`:
@code
TAGFILES = ./docs/doxygen-tags/libstdc++.tag=https://gcc.gnu.org/onlinedocs/libstdc++/latest-doxygen \
./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.10.0
./docs/doxygen-tags/opencv.tag=http://docs.opencv.org/4.11.0
@endcode
Doxygen can now use the information from the tag file to link to the OpenCV

View File

@ -6,9 +6,9 @@
#define OPENCV_VERSION_HPP
#define CV_VERSION_MAJOR 4
#define CV_VERSION_MINOR 10
#define CV_VERSION_MINOR 11
#define CV_VERSION_REVISION 0
#define CV_VERSION_STATUS "-dev"
#define CV_VERSION_STATUS "-pre"
#define CVAUX_STR_EXP(__A) #__A
#define CVAUX_STR(__A) CVAUX_STR_EXP(__A)

View File

@ -2536,8 +2536,7 @@ double dotProd_16s(const short* src1, const short* src2, int len)
double dotProd_32s(const int* src1, const int* src2, int len)
{
#if CV_SIMD_64F // TODO: enable for CV_SIMD_SCALABLE_64F
// Test failed on RVV(QEMU): Too big difference (=1.20209e-08 > 1.11022e-12)
#if CV_SIMD_64F || CV_SIMD_SCALABLE_64F
double r = .0;
int i = 0;
const int step = VTraits<v_int32>::vlanes();

View File

@ -1749,18 +1749,22 @@ TEST(Core_Mat_array, copyTo_roi_row)
EXPECT_EQ(5, (int)dst2[4]);
}
TEST(Core_Mat_array, SplitMerge)
typedef testing::TestWithParam< tuple<int, perf::MatType> > Core_Mat_arrays;
TEST_P(Core_Mat_arrays, SplitMerge)
{
std::array<cv::Mat, 3> src;
int cn = get<0>(GetParam());
int type = get<1>(GetParam());
std::vector<cv::Mat> src(cn);
for (size_t i = 0; i < src.size(); ++i)
{
src[i] = Mat(10, 10, CV_8U, Scalar((double)(16 * (i + 1))));
src[i] = Mat(10, 10, type, Scalar((double)(16 * (i + 1))));
}
Mat merged;
merge(src, merged);
std::array<cv::Mat, 3> dst;
std::vector<cv::Mat> dst(cn);
split(merged, dst);
for (size_t i = 0; i < dst.size(); ++i)
@ -1769,6 +1773,17 @@ TEST(Core_Mat_array, SplitMerge)
}
}
INSTANTIATE_TEST_CASE_P(/*nothing*/, Core_Mat_arrays, testing::Combine(
testing::Range(1, 9),
testing::Values(
perf::MatType(CV_8U),
perf::MatType(CV_16U),
perf::MatType(CV_32S),
perf::MatType(CV_64F)
)
)
);
TEST(Mat, regression_8680)
{
Mat_<Point2i> mat(3,1);

View File

@ -477,8 +477,9 @@ class Core_DotProductTest : public Core_MatrixTest
public:
Core_DotProductTest();
protected:
void run_func();
void prepare_to_validation( int test_case_idx );
void run_func() CV_OVERRIDE;
void prepare_to_validation( int test_case_idx ) CV_OVERRIDE;
double get_success_error_level( int test_case_idx, int i, int j ) CV_OVERRIDE;
};
@ -498,6 +499,15 @@ void Core_DotProductTest::prepare_to_validation( int )
test_mat[REF_OUTPUT][0].at<Scalar>(0,0) = Scalar(cvtest::crossCorr( test_mat[INPUT][0], test_mat[INPUT][1] ));
}
double Core_DotProductTest::get_success_error_level( int test_case_idx, int i, int j )
{
#ifdef __riscv
const int depth = test_mat[i][j].depth();
if (depth == CV_64F)
return 1.7e-5;
#endif
return Core_MatrixTest::get_success_error_level( test_case_idx, i, j );
}
///////// crossproduct //////////

View File

@ -6,7 +6,7 @@
#define OPENCV_DNN_VERSION_HPP
/// Use with major OpenCV version only.
#define OPENCV_DNN_API_VERSION 20240521
#define OPENCV_DNN_API_VERSION 20241223
#if !defined CV_DOXYGEN && !defined CV_STATIC_ANALYSIS && !defined CV_DNN_DONT_ADD_INLINE_NS
#define CV__DNN_INLINE_NS __CV_CAT(dnn4_v, OPENCV_DNN_API_VERSION)

View File

@ -107,3 +107,18 @@ void Net::Impl::initCUDABackend(const std::vector<LayerPin>& blobsToKeep_)
CV__DNN_INLINE_NS_END
}} // namespace cv::dnn
#endif // HAVE_CUDA
namespace cv { namespace dnn {
bool haveCUDA()
{
#ifdef HAVE_CUDA
int dev = 0;
static bool ret = (cudaGetDevice(&dev) == cudaSuccess);
return ret;
#else
return false;
#endif
}
}} // namespace cv::dnn

View File

@ -29,13 +29,7 @@ namespace cv { namespace dnn {
return id == DNN_TARGET_CUDA_FP16 || id == DNN_TARGET_CUDA;
}
constexpr bool haveCUDA() {
#ifdef HAVE_CUDA
return true;
#else
return false;
#endif
}
bool haveCUDA();
#ifdef HAVE_CUDA
namespace cuda4dnn { namespace csl {

View File

@ -103,25 +103,6 @@ static const std::map<std::string, OpenVINOModelTestCaseInfo>& getOpenVINOTestMo
"intel/age-gender-recognition-retail-0013/FP16/age-gender-recognition-retail-0013",
"intel/age-gender-recognition-retail-0013/FP32/age-gender-recognition-retail-0013"
}},
#endif
#if INF_ENGINE_RELEASE >= 2021020000
// OMZ: 2020.2
{ "face-detection-0105", {
"intel/face-detection-0105/FP32/face-detection-0105",
"intel/face-detection-0105/FP16/face-detection-0105"
}},
{ "face-detection-0106", {
"intel/face-detection-0106/FP32/face-detection-0106",
"intel/face-detection-0106/FP16/face-detection-0106"
}},
#endif
#if INF_ENGINE_RELEASE >= 2021040000
// OMZ: 2021.4
{ "person-vehicle-bike-detection-2004", {
"intel/person-vehicle-bike-detection-2004/FP32/person-vehicle-bike-detection-2004",
"intel/person-vehicle-bike-detection-2004/FP16/person-vehicle-bike-detection-2004"
//"intel/person-vehicle-bike-detection-2004/FP16-INT8/person-vehicle-bike-detection-2004"
}},
#endif
};
@ -228,7 +209,12 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
for (auto&& it : model->inputs())
{
auto type = it.get_element_type();
auto shape = it.get_shape();
auto shape_ = it.get_partial_shape();
if (shape_.is_dynamic())
{
FAIL() << "Model should not have dynamic shapes (" << it.get_any_name() << " => " << shape_ << ")";
}
auto shape = shape_.to_shape();
auto& m = inputsMap[it.get_any_name()];
auto tensor = ov::Tensor(type, shape);
@ -265,10 +251,10 @@ void runIE(Target target, const std::string& xmlPath, const std::string& binPath
for (const auto& it : model->outputs())
{
auto type = it.get_element_type();
auto shape = it.get_shape();
auto& m = outputsMap[it.get_any_name()];
auto tensor = infRequest.get_tensor(it);
auto shape = tensor.get_shape();
if (type == ov::element::f32)
{
m.create(std::vector<int>(shape.begin(), shape.end()), CV_32F);
@ -341,22 +327,9 @@ TEST_P(DNNTestOpenVINO, models)
if (targetId == DNN_TARGET_MYRIAD && (false
|| modelName == "person-detection-retail-0013" // ncDeviceOpen:1013 Failed to find booted device after boot
|| modelName == "age-gender-recognition-retail-0013" // ncDeviceOpen:1013 Failed to find booted device after boot
|| modelName == "face-detection-0105" // get_element_type() must be called on a node with exactly one output
|| modelName == "face-detection-0106" // get_element_type() must be called on a node with exactly one output
|| modelName == "person-vehicle-bike-detection-2004" // 2021.4+: ncDeviceOpen:1013 Failed to find booted device after boot
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_MYRIAD, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (targetId == DNN_TARGET_OPENCL && (false
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
if (targetId == DNN_TARGET_OPENCL_FP16 && (false
|| modelName == "face-detection-0106" // Operation: 2278 of type ExperimentalDetectronPriorGridGenerator(op::v6) is not supported
)
)
applyTestTag(CV_TEST_TAG_DNN_SKIP_IE_OPENCL_FP16, CV_TEST_TAG_DNN_SKIP_IE_NGRAPH, CV_TEST_TAG_DNN_SKIP_IE_VERSION);
#endif
#if INF_ENGINE_VER_MAJOR_GE(2020020000)
@ -399,14 +372,6 @@ TEST_P(DNNTestOpenVINO, models)
if (targetId == DNN_TARGET_CPU && checkHardwareSupport(CV_CPU_AVX_512F))
eps = 1e-5;
#endif
#if INF_ENGINE_VER_MAJOR_GE(2021030000)
if (targetId == DNN_TARGET_CPU && modelName == "face-detection-0105")
eps = 2e-4;
#endif
#if INF_ENGINE_VER_MAJOR_GE(2021040000)
if (targetId == DNN_TARGET_CPU && modelName == "person-vehicle-bike-detection-2004")
eps = 1e-6;
#endif
EXPECT_EQ(ieOutputsMap.size(), cvOutputsMap.size());
for (auto& srcIt : ieOutputsMap)

View File

@ -17,6 +17,8 @@
#include <opencv2/imgproc.hpp>
#include <chrono>
#ifdef HAVE_GSTREAMER
#include <gst/app/gstappsink.h>
#include <gst/gstbuffer.h>

View File

@ -243,7 +243,7 @@ bool AvifDecoder::readData(Mat &img) {
return false;
}
m_animation.durations.push_back(decoder_->imageTiming.durationInTimescales);
m_animation.durations.push_back(decoder_->imageTiming.duration * 1000);
if (decoder_->image->exif.size > 0) {
m_exif.parseExif(decoder_->image->exif.data, decoder_->image->exif.size);

File diff suppressed because it is too large Load Diff

View File

@ -47,26 +47,98 @@
#include "grfmt_base.hpp"
#include "bitstrm.hpp"
#include <png.h>
#include <zlib.h>
namespace cv
{
struct Chunk { unsigned char* p; uint32_t size; };
struct OP { unsigned char* p; uint32_t size; int x, y, w, h, valid, filters; };
typedef struct {
unsigned char r, g, b;
} rgb;
class APNGFrame {
public:
APNGFrame();
// Destructor
~APNGFrame();
bool setMat(const cv::Mat& src, unsigned delayNum = 1, unsigned delayDen = 1000);
// Getters and Setters
unsigned char* getPixels() const { return _pixels; }
void setPixels(unsigned char* pixels);
unsigned int getWidth() const { return _width; }
void setWidth(unsigned int width);
unsigned int getHeight() const { return _height; }
void setHeight(unsigned int height);
unsigned char getColorType() const { return _colorType; }
void setColorType(unsigned char colorType);
rgb* getPalette() { return _palette; }
void setPalette(const rgb* palette);
unsigned char* getTransparency() { return _transparency; }
void setTransparency(const unsigned char* transparency);
int getPaletteSize() const { return _paletteSize; }
void setPaletteSize(int paletteSize);
int getTransparencySize() const { return _transparencySize; }
void setTransparencySize(int transparencySize);
unsigned int getDelayNum() const { return _delayNum; }
void setDelayNum(unsigned int delayNum);
unsigned int getDelayDen() const { return _delayDen; }
void setDelayDen(unsigned int delayDen);
unsigned char** getRows() const { return _rows; }
void setRows(unsigned char** rows);
private:
unsigned char* _pixels;
unsigned int _width;
unsigned int _height;
unsigned char _colorType;
rgb _palette[256];
unsigned char _transparency[256];
int _paletteSize;
int _transparencySize;
unsigned int _delayNum;
unsigned int _delayDen;
unsigned char** _rows;
};
class PngDecoder CV_FINAL : public BaseImageDecoder
{
public:
PngDecoder();
virtual ~PngDecoder();
bool readData( Mat& img ) CV_OVERRIDE;
bool readHeader() CV_OVERRIDE;
void close();
bool nextPage() CV_OVERRIDE;
ImageDecoder newDecoder() const CV_OVERRIDE;
protected:
static void readDataFromBuf(void* png_ptr, uchar* dst, size_t size);
static void info_fn(png_structp png_ptr, png_infop info_ptr);
static void row_fn(png_structp png_ptr, png_bytep new_row, png_uint_32 row_num, int pass);
bool processing_start(void* frame_ptr, const Mat& img);
bool processing_finish();
void compose_frame(unsigned char** rows_dst, unsigned char** rows_src, unsigned char bop, uint32_t x, uint32_t y, uint32_t w, uint32_t h, int channels);
size_t read_from_io(void* _Buffer, size_t _ElementSize, size_t _ElementCount);
uint32_t read_chunk(Chunk& chunk);
int m_bit_depth;
void* m_png_ptr; // pointer to decompression structure
@ -74,7 +146,25 @@ protected:
void* m_end_info; // pointer to one more image information structure
FILE* m_f;
int m_color_type;
Chunk m_chunkIHDR;
int m_frame_no;
size_t m_buf_pos;
std::vector<Chunk> m_chunksInfo;
APNGFrame frameRaw;
APNGFrame frameNext;
APNGFrame frameCur;
Mat m_mat_raw;
Mat m_mat_next;
uint32_t w0;
uint32_t h0;
uint32_t x0;
uint32_t y0;
uint32_t delay_num;
uint32_t delay_den;
uint32_t dop;
uint32_t bop;
bool m_is_fcTL_loaded;
bool m_is_IDAT_loaded;
};
@ -84,14 +174,40 @@ public:
PngEncoder();
virtual ~PngEncoder();
bool isFormatSupported( int depth ) const CV_OVERRIDE;
bool write( const Mat& img, const std::vector<int>& params ) CV_OVERRIDE;
bool isFormatSupported( int depth ) const CV_OVERRIDE;
bool write( const Mat& img, const std::vector<int>& params ) CV_OVERRIDE;
bool writemulti(const std::vector<Mat>& img_vec, const std::vector<int>& params) CV_OVERRIDE;
bool writeanimation(const Animation& animinfo, const std::vector<int>& params) CV_OVERRIDE;
ImageEncoder newEncoder() const CV_OVERRIDE;
protected:
static void writeDataToBuf(void* png_ptr, uchar* src, size_t size);
static void writeDataToBuf(void* png_ptr, unsigned char* src, size_t size);
static void flushBuf(void* png_ptr);
size_t write_to_io(void const* _Buffer, size_t _ElementSize, size_t _ElementCount, FILE* _Stream);
private:
void writeChunk(FILE* f, const char* name, unsigned char* data, uint32_t length);
void writeIDATs(FILE* f, int frame, unsigned char* data, uint32_t length, uint32_t idat_size);
void processRect(unsigned char* row, int rowbytes, int bpp, int stride, int h, unsigned char* rows);
void deflateRectFin(unsigned char* zbuf, uint32_t* zsize, int bpp, int stride, unsigned char* rows, int zbuf_size, int n);
void deflateRectOp(unsigned char* pdata, int x, int y, int w, int h, int bpp, int stride, int zbuf_size, int n);
bool getRect(uint32_t w, uint32_t h, unsigned char* pimage1, unsigned char* pimage2, unsigned char* ptemp, uint32_t bpp, uint32_t stride, int zbuf_size, uint32_t has_tcolor, uint32_t tcolor, int n);
AutoBuffer<unsigned char> op_zbuf1;
AutoBuffer<unsigned char> op_zbuf2;
AutoBuffer<unsigned char> row_buf;
AutoBuffer<unsigned char> sub_row;
AutoBuffer<unsigned char> up_row;
AutoBuffer<unsigned char> avg_row;
AutoBuffer<unsigned char> paeth_row;
z_stream op_zstream1;
z_stream op_zstream2;
OP op[6];
rgb palette[256];
unsigned char trns[256];
uint32_t palsize, trnssize;
uint32_t next_seq_num;
};
}

View File

@ -776,7 +776,8 @@ imreadanimation_(const String& filename, int flags, int start, int count, Animat
if (current >= start)
{
animation.durations.push_back(decoder->animation().durations[decoder->animation().durations.size() - 1]);
int duration = decoder->animation().durations.size() > 0 ? decoder->animation().durations.back() : 1000;
animation.durations.push_back(duration);
animation.frames.push_back(mat);
}

View File

@ -0,0 +1,436 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
static void readFileBytes(const std::string& fname, std::vector<unsigned char>& buf)
{
FILE * wfile = fopen(fname.c_str(), "rb");
if (wfile != NULL)
{
fseek(wfile, 0, SEEK_END);
size_t wfile_size = ftell(wfile);
fseek(wfile, 0, SEEK_SET);
buf.resize(wfile_size);
size_t data_size = fread(&buf[0], 1, wfile_size, wfile);
if(wfile)
{
fclose(wfile);
}
EXPECT_EQ(data_size, wfile_size);
}
}
static bool fillFrames(Animation& animation, bool hasAlpha, int n = 14)
{
// Set the path to the test image directory and filename for loading.
const string root = cvtest::TS::ptr()->get_data_path();
const string filename = root + "pngsuite/tp1n3p08.png";
EXPECT_TRUE(imreadanimation(filename, animation));
EXPECT_EQ(1000, animation.durations.back());
if (!hasAlpha)
cvtColor(animation.frames[0], animation.frames[0], COLOR_BGRA2BGR);
animation.loop_count = 0xffff; // 0xffff is the maximum value to set.
// Add the first frame with a duration value of 400 milliseconds.
int duration = 80;
animation.durations[0] = duration * 5;
Mat image = animation.frames[0].clone();
putText(animation.frames[0], "0", Point(5, 28), FONT_HERSHEY_SIMPLEX, .5, Scalar(100, 255, 0, 255), 2);
// Define a region of interest (ROI)
Rect roi(2, 16, 26, 16);
// Modify the ROI in n iterations to simulate slight changes in animation frames.
for (int i = 1; i < n; i++)
{
roi.x++;
roi.width -= 2;
RNG rng = theRNG();
for (int x = roi.x; x < roi.x + roi.width; x++)
for (int y = roi.y; y < roi.y + roi.height; y++)
{
if (hasAlpha)
{
Vec4b& pixel = image.at<Vec4b>(y, x);
if (pixel[3] > 0)
{
if (pixel[0] > 10) pixel[0] -= (uchar)rng.uniform(2, 5);
if (pixel[1] > 10) pixel[1] -= (uchar)rng.uniform(2, 5);
if (pixel[2] > 10) pixel[2] -= (uchar)rng.uniform(2, 5);
pixel[3] -= (uchar)rng.uniform(2, 5);
}
}
else
{
Vec3b& pixel = image.at<Vec3b>(y, x);
if (pixel[0] > 50) pixel[0] -= (uchar)rng.uniform(2, 5);
if (pixel[1] > 50) pixel[1] -= (uchar)rng.uniform(2, 5);
if (pixel[2] > 50) pixel[2] -= (uchar)rng.uniform(2, 5);
}
}
// Update the duration and add the modified frame to the animation.
duration += rng.uniform(2, 10); // Increase duration with random value (to be sure different duration values saved correctly).
animation.frames.push_back(image.clone());
putText(animation.frames[i], format("%d", i), Point(5, 28), FONT_HERSHEY_SIMPLEX, .5, Scalar(100, 255, 0, 255), 2);
animation.durations.push_back(duration);
}
// Add two identical frames with the same duration.
if (animation.frames.size() > 1 && animation.frames.size() < 20)
{
animation.durations.push_back(++duration);
animation.frames.push_back(animation.frames.back());
animation.durations.push_back(++duration);
animation.frames.push_back(animation.frames.back());
}
return true;
}
#ifdef HAVE_WEBP
TEST(Imgcodecs_WebP, imwriteanimation_rgba)
{
Animation s_animation, l_animation;
EXPECT_TRUE(fillFrames(s_animation, true));
s_animation.bgcolor = Scalar(50, 100, 150, 128); // different values for test purpose.
// Create a temporary output filename for saving the animation.
string output = cv::tempfile(".webp");
// Write the animation to a .webp file and verify success.
EXPECT_TRUE(imwriteanimation(output, s_animation));
// Read the animation back and compare with the original.
EXPECT_TRUE(imreadanimation(output, l_animation));
// Since the last frames are identical, WebP optimizes by storing only one of them,
// and the duration value for the last frame is handled by libwebp.
size_t expected_frame_count = s_animation.frames.size() - 2;
// Verify that the number of frames matches the expected count.
EXPECT_EQ(expected_frame_count, imcount(output));
EXPECT_EQ(expected_frame_count, l_animation.frames.size());
// Check that the background color and loop count match between saved and loaded animations.
EXPECT_EQ(l_animation.bgcolor, s_animation.bgcolor); // written as BGRA order
EXPECT_EQ(l_animation.loop_count, s_animation.loop_count);
// Verify that the durations of frames match.
for (size_t i = 0; i < l_animation.frames.size() - 1; i++)
EXPECT_EQ(s_animation.durations[i], l_animation.durations[i]);
EXPECT_TRUE(imreadanimation(output, l_animation, 5, 3));
EXPECT_EQ(expected_frame_count + 3, l_animation.frames.size());
EXPECT_EQ(l_animation.frames.size(), l_animation.durations.size());
EXPECT_EQ(0, cvtest::norm(l_animation.frames[5], l_animation.frames[14], NORM_INF));
EXPECT_EQ(0, cvtest::norm(l_animation.frames[6], l_animation.frames[15], NORM_INF));
EXPECT_EQ(0, cvtest::norm(l_animation.frames[7], l_animation.frames[16], NORM_INF));
// Verify whether the imread function successfully loads the first frame
Mat frame = imread(output, IMREAD_UNCHANGED);
EXPECT_EQ(0, cvtest::norm(l_animation.frames[0], frame, NORM_INF));
std::vector<uchar> buf;
readFileBytes(output, buf);
vector<Mat> webp_frames;
EXPECT_TRUE(imdecodemulti(buf, IMREAD_UNCHANGED, webp_frames));
EXPECT_EQ(expected_frame_count, webp_frames.size());
// Clean up by removing the temporary file.
EXPECT_EQ(0, remove(output.c_str()));
}
TEST(Imgcodecs_WebP, imwriteanimation_rgb)
{
Animation s_animation, l_animation;
EXPECT_TRUE(fillFrames(s_animation, false));
// Create a temporary output filename for saving the animation.
string output = cv::tempfile(".webp");
// Write the animation to a .webp file and verify success.
EXPECT_TRUE(imwriteanimation(output, s_animation));
// Read the animation back and compare with the original.
EXPECT_TRUE(imreadanimation(output, l_animation));
// Since the last frames are identical, WebP optimizes by storing only one of them,
// and the duration value for the last frame is handled by libwebp.
size_t expected_frame_count = s_animation.frames.size() - 2;
// Verify that the number of frames matches the expected count.
EXPECT_EQ(expected_frame_count, imcount(output));
EXPECT_EQ(expected_frame_count, l_animation.frames.size());
// Verify that the durations of frames match.
for (size_t i = 0; i < l_animation.frames.size() - 1; i++)
EXPECT_EQ(s_animation.durations[i], l_animation.durations[i]);
EXPECT_TRUE(imreadanimation(output, l_animation, 5, 3));
EXPECT_EQ(expected_frame_count + 3, l_animation.frames.size());
EXPECT_EQ(l_animation.frames.size(), l_animation.durations.size());
EXPECT_TRUE(cvtest::norm(l_animation.frames[5], l_animation.frames[14], NORM_INF) == 0);
EXPECT_TRUE(cvtest::norm(l_animation.frames[6], l_animation.frames[15], NORM_INF) == 0);
EXPECT_TRUE(cvtest::norm(l_animation.frames[7], l_animation.frames[16], NORM_INF) == 0);
// Verify whether the imread function successfully loads the first frame
Mat frame = imread(output, IMREAD_COLOR);
EXPECT_TRUE(cvtest::norm(l_animation.frames[0], frame, NORM_INF) == 0);
std::vector<uchar> buf;
readFileBytes(output, buf);
vector<Mat> webp_frames;
EXPECT_TRUE(imdecodemulti(buf, IMREAD_UNCHANGED, webp_frames));
EXPECT_EQ(expected_frame_count,webp_frames.size());
// Clean up by removing the temporary file.
EXPECT_EQ(0, remove(output.c_str()));
}
TEST(Imgcodecs_WebP, imwritemulti_rgba)
{
Animation s_animation;
EXPECT_TRUE(fillFrames(s_animation, true));
string output = cv::tempfile(".webp");
ASSERT_TRUE(imwrite(output, s_animation.frames));
vector<Mat> read_frames;
ASSERT_TRUE(imreadmulti(output, read_frames, IMREAD_UNCHANGED));
EXPECT_EQ(s_animation.frames.size() - 2, read_frames.size());
EXPECT_EQ(4, s_animation.frames[0].channels());
EXPECT_EQ(0, remove(output.c_str()));
}
TEST(Imgcodecs_WebP, imwritemulti_rgb)
{
Animation s_animation;
EXPECT_TRUE(fillFrames(s_animation, false));
string output = cv::tempfile(".webp");
ASSERT_TRUE(imwrite(output, s_animation.frames));
vector<Mat> read_frames;
ASSERT_TRUE(imreadmulti(output, read_frames));
EXPECT_EQ(s_animation.frames.size() - 2, read_frames.size());
EXPECT_EQ(0, remove(output.c_str()));
}
TEST(Imgcodecs_WebP, imencode_rgba)
{
Animation s_animation;
EXPECT_TRUE(fillFrames(s_animation, true, 3));
std::vector<uchar> buf;
vector<Mat> apng_frames;
// Test encoding and decoding the images in memory (without saving to disk).
EXPECT_TRUE(imencode(".webp", s_animation.frames, buf));
EXPECT_TRUE(imdecodemulti(buf, IMREAD_UNCHANGED, apng_frames));
EXPECT_EQ(s_animation.frames.size() - 2, apng_frames.size());
}
#endif // HAVE_WEBP
#ifdef HAVE_PNG
TEST(Imgcodecs_APNG, imwriteanimation_rgba)
{
Animation s_animation, l_animation;
EXPECT_TRUE(fillFrames(s_animation, true));
// Create a temporary output filename for saving the animation.
string output = cv::tempfile(".png");
// Write the animation to a .png file and verify success.
EXPECT_TRUE(imwriteanimation(output, s_animation));
// Read the animation back and compare with the original.
EXPECT_TRUE(imreadanimation(output, l_animation));
size_t expected_frame_count = s_animation.frames.size() - 2;
// Verify that the number of frames matches the expected count.
EXPECT_EQ(expected_frame_count, imcount(output));
EXPECT_EQ(expected_frame_count, l_animation.frames.size());
for (size_t i = 0; i < l_animation.frames.size() - 1; i++)
{
EXPECT_EQ(s_animation.durations[i], l_animation.durations[i]);
EXPECT_EQ(0, cvtest::norm(s_animation.frames[i], l_animation.frames[i], NORM_INF));
}
EXPECT_TRUE(imreadanimation(output, l_animation, 5, 3));
EXPECT_EQ(expected_frame_count + 3, l_animation.frames.size());
EXPECT_EQ(l_animation.frames.size(), l_animation.durations.size());
EXPECT_EQ(0, cvtest::norm(l_animation.frames[5], l_animation.frames[14], NORM_INF));
EXPECT_EQ(0, cvtest::norm(l_animation.frames[6], l_animation.frames[15], NORM_INF));
EXPECT_EQ(0, cvtest::norm(l_animation.frames[7], l_animation.frames[16], NORM_INF));
// Verify whether the imread function successfully loads the first frame
Mat frame = imread(output, IMREAD_UNCHANGED);
EXPECT_EQ(0, cvtest::norm(l_animation.frames[0], frame, NORM_INF));
std::vector<uchar> buf;
readFileBytes(output, buf);
vector<Mat> apng_frames;
EXPECT_TRUE(imdecodemulti(buf, IMREAD_UNCHANGED, apng_frames));
EXPECT_EQ(expected_frame_count, apng_frames.size());
apng_frames.clear();
// Test saving the animation frames as individual still images.
EXPECT_TRUE(imwrite(output, s_animation.frames));
// Read back the still images into a vector of Mats.
EXPECT_TRUE(imreadmulti(output, apng_frames));
// Expect all frames written as multi-page image
EXPECT_EQ(expected_frame_count, apng_frames.size());
// Clean up by removing the temporary file.
EXPECT_EQ(0, remove(output.c_str()));
}
TEST(Imgcodecs_APNG, imwriteanimation_rgb)
{
Animation s_animation, l_animation;
EXPECT_TRUE(fillFrames(s_animation, false));
string output = cv::tempfile(".png");
// Write the animation to a .png file and verify success.
EXPECT_TRUE(imwriteanimation(output, s_animation));
// Read the animation back and compare with the original.
EXPECT_TRUE(imreadanimation(output, l_animation));
EXPECT_EQ(l_animation.frames.size(), s_animation.frames.size() - 2);
for (size_t i = 0; i < l_animation.frames.size() - 1; i++)
{
EXPECT_EQ(0, cvtest::norm(s_animation.frames[i], l_animation.frames[i], NORM_INF));
}
EXPECT_EQ(0, remove(output.c_str()));
}
TEST(Imgcodecs_APNG, imwritemulti_rgba)
{
Animation s_animation;
EXPECT_TRUE(fillFrames(s_animation, true));
string output = cv::tempfile(".png");
EXPECT_EQ(true, imwrite(output, s_animation.frames));
vector<Mat> read_frames;
EXPECT_EQ(true, imreadmulti(output, read_frames, IMREAD_UNCHANGED));
EXPECT_EQ(read_frames.size(), s_animation.frames.size() - 2);
EXPECT_EQ(imcount(output), read_frames.size());
EXPECT_EQ(0, remove(output.c_str()));
}
TEST(Imgcodecs_APNG, imwritemulti_rgb)
{
Animation s_animation;
EXPECT_TRUE(fillFrames(s_animation, false));
string output = cv::tempfile(".png");
ASSERT_TRUE(imwrite(output, s_animation.frames));
vector<Mat> read_frames;
ASSERT_TRUE(imreadmulti(output, read_frames));
EXPECT_EQ(read_frames.size(), s_animation.frames.size() - 2);
EXPECT_EQ(0, remove(output.c_str()));
for (size_t i = 0; i < read_frames.size(); i++)
{
EXPECT_EQ(0, cvtest::norm(s_animation.frames[i], read_frames[i], NORM_INF));
}
}
TEST(Imgcodecs_APNG, imwritemulti_gray)
{
Animation s_animation;
EXPECT_TRUE(fillFrames(s_animation, false));
for (size_t i = 0; i < s_animation.frames.size(); i++)
{
cvtColor(s_animation.frames[i], s_animation.frames[i], COLOR_BGR2GRAY);
}
string output = cv::tempfile(".png");
EXPECT_TRUE(imwrite(output, s_animation.frames));
vector<Mat> read_frames;
EXPECT_TRUE(imreadmulti(output, read_frames));
EXPECT_EQ(1, read_frames[0].channels());
read_frames.clear();
EXPECT_TRUE(imreadmulti(output, read_frames, IMREAD_UNCHANGED));
EXPECT_EQ(1, read_frames[0].channels());
read_frames.clear();
EXPECT_TRUE(imreadmulti(output, read_frames, IMREAD_COLOR));
EXPECT_EQ(3, read_frames[0].channels());
read_frames.clear();
EXPECT_TRUE(imreadmulti(output, read_frames, IMREAD_GRAYSCALE));
EXPECT_EQ(0, remove(output.c_str()));
for (size_t i = 0; i < read_frames.size(); i++)
{
EXPECT_EQ(0, cvtest::norm(s_animation.frames[i], read_frames[i], NORM_INF));
}
}
TEST(Imgcodecs_APNG, imwriteanimation_bgcolor)
{
Animation s_animation, l_animation;
EXPECT_TRUE(fillFrames(s_animation, true, 2));
s_animation.bgcolor = Scalar(50, 100, 150, 128); // different values for test purpose.
// Create a temporary output filename for saving the animation.
string output = cv::tempfile(".png");
// Write the animation to a .png file and verify success.
EXPECT_TRUE(imwriteanimation(output, s_animation));
// Read the animation back and compare with the original.
EXPECT_TRUE(imreadanimation(output, l_animation));
// Check that the background color match between saved and loaded animations.
EXPECT_EQ(l_animation.bgcolor, s_animation.bgcolor);
EXPECT_EQ(0, remove(output.c_str()));
EXPECT_TRUE(fillFrames(s_animation, true, 2));
s_animation.bgcolor = Scalar();
output = cv::tempfile(".png");
EXPECT_TRUE(imwriteanimation(output, s_animation));
EXPECT_TRUE(imreadanimation(output, l_animation));
EXPECT_EQ(l_animation.bgcolor, s_animation.bgcolor);
EXPECT_EQ(0, remove(output.c_str()));
}
TEST(Imgcodecs_APNG, imencode_rgba)
{
Animation s_animation;
EXPECT_TRUE(fillFrames(s_animation, true, 3));
std::vector<uchar> buf;
vector<Mat> read_frames;
// Test encoding and decoding the images in memory (without saving to disk).
EXPECT_TRUE(imencode(".png", s_animation.frames, buf));
EXPECT_TRUE(imdecodemulti(buf, IMREAD_UNCHANGED, read_frames));
EXPECT_EQ(read_frames.size(), s_animation.frames.size() - 2);
}
#endif // HAVE_PNG
}} // namespace

View File

@ -1,6 +1,7 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
namespace opencv_test { namespace {
@ -113,234 +114,6 @@ TEST(Imgcodecs_WebP, encode_decode_with_alpha_webp)
EXPECT_EQ(512, img_webp_bgr.rows);
}
TEST(Imgcodecs_WebP, load_save_animation_rgba)
{
RNG rng = theRNG();
// Set the path to the test image directory and filename for loading.
const string root = cvtest::TS::ptr()->get_data_path();
const string filename = root + "pngsuite/tp1n3p08.png";
// Create an Animation object using the default constructor.
// This initializes the loop count to 0 (infinite looping), background color to 0 (transparent)
Animation l_animation;
// Create an Animation object with custom parameters.
int loop_count = 0xffff; // 0xffff is the maximum value to set.
Scalar bgcolor(125, 126, 127, 128); // different values for test purpose.
Animation s_animation(loop_count, bgcolor);
// Load the image file with alpha channel (IMREAD_UNCHANGED).
Mat image = imread(filename, IMREAD_UNCHANGED);
ASSERT_FALSE(image.empty()) << "Failed to load image: " << filename;
// Add the first frame with a duration value of 500 milliseconds.
int duration = 100;
s_animation.durations.push_back(duration * 5);
s_animation.frames.push_back(image.clone()); // Store the first frame.
putText(s_animation.frames[0], "0", Point(5, 28), FONT_HERSHEY_SIMPLEX, .5, Scalar(100, 255, 0, 255), 2);
// Define a region of interest (ROI) in the loaded image for manipulation.
Mat roi = image(Rect(0, 16, 32, 16)); // Select a subregion of the image.
// Modify the ROI in 13 iterations to simulate slight changes in animation frames.
for (int i = 1; i < 14; i++)
{
for (int x = 0; x < roi.rows; x++)
for (int y = 0; y < roi.cols; y++)
{
// Apply random changes to pixel values to create animation variations.
Vec4b& pixel = roi.at<Vec4b>(x, y);
if (pixel[3] > 0)
{
if (pixel[0] > 10) pixel[0] -= (uchar)rng.uniform(3, 10); // Reduce blue channel.
if (pixel[1] > 10) pixel[1] -= (uchar)rng.uniform(3, 10); // Reduce green channel.
if (pixel[2] > 10) pixel[2] -= (uchar)rng.uniform(3, 10); // Reduce red channel.
pixel[3] -= (uchar)rng.uniform(2, 5); // Reduce alpha channel.
}
}
// Update the duration and add the modified frame to the animation.
duration += rng.uniform(2, 10); // Increase duration with random value (to be sure different duration values saved correctly).
s_animation.frames.push_back(image.clone());
putText(s_animation.frames[i], format("%d", i), Point(5, 28), FONT_HERSHEY_SIMPLEX, .5, Scalar(100, 255, 0, 255), 2);
s_animation.durations.push_back(duration);
}
// Add two identical frames with the same duration.
s_animation.durations.push_back(duration);
s_animation.frames.push_back(s_animation.frames[13].clone());
s_animation.durations.push_back(duration);
s_animation.frames.push_back(s_animation.frames[13].clone());
// Create a temporary output filename for saving the animation.
string output = cv::tempfile(".webp");
// Write the animation to a .webp file and verify success.
EXPECT_TRUE(imwriteanimation(output, s_animation));
imwriteanimation("output.webp", s_animation);
// Read the animation back and compare with the original.
EXPECT_TRUE(imreadanimation(output, l_animation));
// Since the last frames are identical, WebP optimizes by storing only one of them,
// and the duration value for the last frame is handled by libwebp.
size_t expected_frame_count = s_animation.frames.size() - 2;
// Verify that the number of frames matches the expected count.
EXPECT_EQ(imcount(output), expected_frame_count);
EXPECT_EQ(l_animation.frames.size(), expected_frame_count);
// Check that the background color and loop count match between saved and loaded animations.
EXPECT_EQ(l_animation.bgcolor, s_animation.bgcolor); // written as BGRA order
EXPECT_EQ(l_animation.loop_count, s_animation.loop_count);
// Verify that the durations of frames match.
for (size_t i = 0; i < l_animation.frames.size() - 1; i++)
EXPECT_EQ(s_animation.durations[i], l_animation.durations[i]);
EXPECT_TRUE(imreadanimation(output, l_animation, 5, 3));
EXPECT_EQ(l_animation.frames.size(), expected_frame_count + 3);
EXPECT_EQ(l_animation.frames.size(), l_animation.durations.size());
EXPECT_EQ(0, cvtest::norm(l_animation.frames[5], l_animation.frames[14], NORM_INF));
EXPECT_EQ(0, cvtest::norm(l_animation.frames[6], l_animation.frames[15], NORM_INF));
EXPECT_EQ(0, cvtest::norm(l_animation.frames[7], l_animation.frames[16], NORM_INF));
// Verify whether the imread function successfully loads the first frame
Mat frame = imread(output, IMREAD_UNCHANGED);
EXPECT_EQ(0, cvtest::norm(l_animation.frames[0], frame, NORM_INF));
std::vector<uchar> buf;
readFileBytes(output, buf);
vector<Mat> webp_frames;
EXPECT_TRUE(imdecodemulti(buf, IMREAD_UNCHANGED, webp_frames));
EXPECT_EQ(expected_frame_count, webp_frames.size());
webp_frames.clear();
// Test saving the animation frames as individual still images.
EXPECT_TRUE(imwrite(output, s_animation.frames));
// Read back the still images into a vector of Mats.
EXPECT_TRUE(imreadmulti(output, webp_frames));
// Expect all frames written as multi-page image
expected_frame_count = 14;
EXPECT_EQ(expected_frame_count, webp_frames.size());
// Test encoding and decoding the images in memory (without saving to disk).
webp_frames.clear();
EXPECT_TRUE(imencode(".webp", s_animation.frames, buf));
EXPECT_TRUE(imdecodemulti(buf, IMREAD_UNCHANGED, webp_frames));
EXPECT_EQ(expected_frame_count, webp_frames.size());
// Clean up by removing the temporary file.
EXPECT_EQ(0, remove(output.c_str()));
}
TEST(Imgcodecs_WebP, load_save_animation_rgb)
{
RNG rng = theRNG();
// Set the path to the test image directory and filename for loading.
const string root = cvtest::TS::ptr()->get_data_path();
const string filename = root + "pngsuite/tp1n3p08.png";
// Create an Animation object using the default constructor.
// This initializes the loop count to 0 (infinite looping), background color to 0 (transparent)
Animation l_animation;
// Create an Animation object with custom parameters.
int loop_count = 0xffff; // 0xffff is the maximum value to set.
Scalar bgcolor(125, 126, 127, 128); // different values for test purpose.
Animation s_animation(loop_count, bgcolor);
// Load the image file without alpha channel
Mat image = imread(filename);
ASSERT_FALSE(image.empty()) << "Failed to load image: " << filename;
// Add the first frame with a duration value of 500 milliseconds.
int duration = 100;
s_animation.durations.push_back(duration * 5);
s_animation.frames.push_back(image.clone()); // Store the first frame.
putText(s_animation.frames[0], "0", Point(5, 28), FONT_HERSHEY_SIMPLEX, .5, Scalar(100, 255, 0, 255), 2);
// Define a region of interest (ROI) in the loaded image for manipulation.
Mat roi = image(Rect(0, 16, 32, 16)); // Select a subregion of the image.
// Modify the ROI in 13 iterations to simulate slight changes in animation frames.
for (int i = 1; i < 14; i++)
{
for (int x = 0; x < roi.rows; x++)
for (int y = 0; y < roi.cols; y++)
{
// Apply random changes to pixel values to create animation variations.
Vec3b& pixel = roi.at<Vec3b>(x, y);
if (pixel[0] > 50) pixel[0] -= (uchar)rng.uniform(3, 10); // Reduce blue channel.
if (pixel[1] > 50) pixel[1] -= (uchar)rng.uniform(3, 10); // Reduce green channel.
if (pixel[2] > 50) pixel[2] -= (uchar)rng.uniform(3, 10); // Reduce red channel.
}
// Update the duration and add the modified frame to the animation.
duration += rng.uniform(2, 10); // Increase duration with random value (to be sure different duration values saved correctly).
s_animation.frames.push_back(image.clone());
putText(s_animation.frames[i], format("%d", i), Point(5, 28), FONT_HERSHEY_SIMPLEX, .5, Scalar(100, 255, 0, 255), 2);
s_animation.durations.push_back(duration);
}
// Add two identical frames with the same duration.
s_animation.durations.push_back(duration);
s_animation.frames.push_back(s_animation.frames[13].clone());
s_animation.durations.push_back(duration);
s_animation.frames.push_back(s_animation.frames[13].clone());
// Create a temporary output filename for saving the animation.
string output = cv::tempfile(".webp");
// Write the animation to a .webp file and verify success.
EXPECT_EQ(true, imwriteanimation(output, s_animation));
// Read the animation back and compare with the original.
EXPECT_EQ(true, imreadanimation(output, l_animation));
// Since the last frames are identical, WebP optimizes by storing only one of them,
// and the duration value for the last frame is handled by libwebp.
size_t expected_frame_count = s_animation.frames.size() - 2;
// Verify that the number of frames matches the expected count.
EXPECT_EQ(imcount(output), expected_frame_count);
EXPECT_EQ(l_animation.frames.size(), expected_frame_count);
// Check that the background color and loop count match between saved and loaded animations.
EXPECT_EQ(l_animation.bgcolor, s_animation.bgcolor); // written as BGRA order
EXPECT_EQ(l_animation.loop_count, s_animation.loop_count);
// Verify that the durations of frames match.
for (size_t i = 0; i < l_animation.frames.size() - 1; i++)
EXPECT_EQ(s_animation.durations[i], l_animation.durations[i]);
EXPECT_EQ(true, imreadanimation(output, l_animation, 5, 3));
EXPECT_EQ(l_animation.frames.size(), expected_frame_count + 3);
EXPECT_EQ(l_animation.frames.size(), l_animation.durations.size());
EXPECT_TRUE(cvtest::norm(l_animation.frames[5], l_animation.frames[14], NORM_INF) == 0);
EXPECT_TRUE(cvtest::norm(l_animation.frames[6], l_animation.frames[15], NORM_INF) == 0);
EXPECT_TRUE(cvtest::norm(l_animation.frames[7], l_animation.frames[16], NORM_INF) == 0);
// Verify whether the imread function successfully loads the first frame
Mat frame = imread(output, IMREAD_COLOR);
EXPECT_TRUE(cvtest::norm(l_animation.frames[0], frame, NORM_INF) == 0);
std::vector<uchar> buf;
readFileBytes(output, buf);
vector<Mat> webp_frames;
EXPECT_TRUE(imdecodemulti(buf, IMREAD_UNCHANGED, webp_frames));
EXPECT_EQ(webp_frames.size(), expected_frame_count);
// Clean up by removing the temporary file.
EXPECT_EQ(0, remove(output.c_str()));
}
#endif // HAVE_WEBP
}} // namespace

View File

@ -12,6 +12,10 @@ ocv_add_dispatched_file(smooth SSE2 SSE4_1 AVX2)
ocv_add_dispatched_file(sumpixels SSE2 AVX2 AVX512_SKX)
ocv_define_module(imgproc opencv_core WRAP java objc python js)
if(OPENCV_CORE_EXCLUDE_C_API)
ocv_target_compile_definitions(${the_module} PRIVATE "OPENCV_EXCLUDE_C_API=1")
endif()
if(HAVE_IPP)
# OPENCV_IPP_ENABLE_ALL is defined in modules/core/CMakeList.txt
OCV_OPTION(OPENCV_IPP_GAUSSIAN_BLUR "Enable IPP optimizations for GaussianBlur (+8Mb in binary size)" OPENCV_IPP_ENABLE_ALL)

View File

@ -387,6 +387,7 @@ void cvtColor( InputArray _src, OutputArray _dst, int code, int dcn, AlgorithmHi
}
} //namespace cv
#ifndef OPENCV_EXCLUDE_C_API
CV_IMPL void
cvCvtColor( const CvArr* srcarr, CvArr* dstarr, int code )
@ -397,3 +398,5 @@ cvCvtColor( const CvArr* srcarr, CvArr* dstarr, int code )
cv::cvtColor(src, dst, code, dst.channels());
CV_Assert( dst.data == dst0.data );
}
#endif

View File

@ -4245,6 +4245,7 @@ void cv::resize( InputArray _src, OutputArray _dst, Size dsize,
hal::resize(src.type(), src.data, src.step, src.cols, src.rows, dst.data, dst.step, dst.cols, dst.rows, inv_scale_x, inv_scale_y, interpolation);
}
#ifndef OPENCV_EXCLUDE_C_API
CV_IMPL void
cvResize( const CvArr* srcarr, CvArr* dstarr, int method )
@ -4255,4 +4256,5 @@ cvResize( const CvArr* srcarr, CvArr* dstarr, int method )
(double)dst.rows/src.rows, method );
}
#endif
/* End of file. */

View File

@ -1078,6 +1078,7 @@ int CV_CompareHistTest::validate_test_results( int /*test_case_idx*/ )
i == CV_COMP_BHATTACHARYYA ? "Bhattacharyya" :
i == CV_COMP_KL_DIV ? "Kullback-Leibler" : "Unknown";
const auto thresh = FLT_EPSILON*14*MAX(fabs(v0),0.17);
if( cvIsNaN(v) || cvIsInf(v) )
{
ts->printf( cvtest::TS::LOG, "The comparison result using the method #%d (%s) is invalid (=%g)\n",
@ -1085,7 +1086,7 @@ int CV_CompareHistTest::validate_test_results( int /*test_case_idx*/ )
code = cvtest::TS::FAIL_INVALID_OUTPUT;
break;
}
else if( fabs(v0 - v) > FLT_EPSILON*14*MAX(fabs(v0),0.1) )
else if( fabs(v0 - v) > thresh )
{
ts->printf( cvtest::TS::LOG, "The comparison result using the method #%d (%s)\n\tis inaccurate (=%g, should be =%g)\n",
i, method_name, v, v0 );

View File

@ -2,6 +2,7 @@ package org.opencv.android;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.opencv.core.MatOfInt;
import org.opencv.imgproc.Imgproc;
@ -123,8 +124,11 @@ public class NativeCameraView extends CameraBridgeViewBase {
return false;
}
MatOfInt params = new MatOfInt(Videoio.CAP_PROP_FRAME_WIDTH, width,
Videoio.CAP_PROP_FRAME_HEIGHT, height);
Log.d(TAG, "Try to open camera with index " + localCameraIndex);
mCamera = new VideoCapture(localCameraIndex, Videoio.CAP_ANDROID);
mCamera = new VideoCapture(localCameraIndex, Videoio.CAP_ANDROID, params);
if (mCamera == null)
return false;
@ -139,9 +143,6 @@ public class NativeCameraView extends CameraBridgeViewBase {
mFrame = new RotatedCameraFrame(new NativeCameraFrame(mCamera), frameRotation);
mCamera.set(Videoio.CAP_PROP_FRAME_WIDTH, width);
mCamera.set(Videoio.CAP_PROP_FRAME_HEIGHT, height);
if (frameRotation % 180 == 0) {
mFrameWidth = (int) mCamera.get(Videoio.CAP_PROP_FRAME_WIDTH);
mFrameHeight = (int) mCamera.get(Videoio.CAP_PROP_FRAME_HEIGHT);
@ -181,10 +182,9 @@ public class NativeCameraView extends CameraBridgeViewBase {
@Override
public Mat rgba() {
mCapture.set(Videoio.CAP_PROP_FOURCC, VideoWriter.fourcc('R','G','B','3'));
mCapture.retrieve(mBgr);
Log.d(TAG, "Retrived frame with size " + mBgr.cols() + "x" + mBgr.rows() + " and channels: " + mBgr.channels());
Imgproc.cvtColor(mBgr, mRgba, Imgproc.COLOR_RGB2RGBA);
mCapture.set(Videoio.CAP_PROP_FOURCC, VideoWriter.fourcc('R','G','B','4'));
mCapture.retrieve(mRgba);
Log.d(TAG, "Retrieved frame with size " + mRgba.cols() + "x" + mRgba.rows() + " and channels: " + mRgba.channels());
return mRgba;
}
@ -192,7 +192,7 @@ public class NativeCameraView extends CameraBridgeViewBase {
public Mat gray() {
mCapture.set(Videoio.CAP_PROP_FOURCC, VideoWriter.fourcc('G','R','E','Y'));
mCapture.retrieve(mGray);
Log.d(TAG, "Retrived frame with size " + mGray.cols() + "x" + mGray.rows() + " and channels: " + mGray.channels());
Log.d(TAG, "Retrieved frame with size " + mGray.cols() + "x" + mGray.rows() + " and channels: " + mGray.channels());
return mGray;
}
@ -200,20 +200,17 @@ public class NativeCameraView extends CameraBridgeViewBase {
mCapture = capture;
mGray = new Mat();
mRgba = new Mat();
mBgr = new Mat();
}
@Override
public void release() {
if (mGray != null) mGray.release();
if (mRgba != null) mRgba.release();
if (mBgr != null) mBgr.release();
}
private VideoCapture mCapture;
private Mat mRgba;
private Mat mGray;
private Mat mBgr;
};
private class CameraWorker implements Runnable {

View File

@ -19,7 +19,7 @@ def main():
os.chdir(SCRIPT_DIR)
package_name = 'opencv'
package_version = os.environ.get('OPENCV_VERSION', '4.10.0') # TODO
package_version = os.environ.get('OPENCV_VERSION', '4.11.0') # TODO
long_description = 'Open Source Computer Vision Library Python bindings' # TODO
@ -66,6 +66,7 @@ def main():
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: C++",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Scientific/Engineering",

View File

@ -719,6 +719,25 @@ enum VideoCaptureOBSensorProperties{
//! @} videoio_flags_others
/** @brief Read data stream interface
*/
class CV_EXPORTS_W IStreamReader
{
public:
virtual ~IStreamReader();
/** @brief Read bytes from stream */
virtual long long read(char* buffer, long long size) = 0;
/** @brief Sets the stream position
*
* @param offset Seek offset
* @param origin SEEK_SET / SEEK_END / SEEK_CUR
*
* @see fseek
*/
virtual long long seek(long long offset, int origin) = 0;
};
class IVideoCapture;
//! @cond IGNORED
@ -798,6 +817,14 @@ public:
*/
CV_WRAP explicit VideoCapture(int index, int apiPreference, const std::vector<int>& params);
/** @overload
@brief Opens a video using data stream.
The `params` parameter allows to specify extra parameters encoded as pairs `(paramId_1, paramValue_1, paramId_2, paramValue_2, ...)`.
See cv::VideoCaptureProperties
*/
CV_WRAP VideoCapture(const Ptr<IStreamReader>& source, int apiPreference, const std::vector<int>& params);
/** @brief Default destructor
The method first calls VideoCapture::release to close the already opened file or camera.
@ -852,6 +879,19 @@ public:
*/
CV_WRAP virtual bool open(int index, int apiPreference, const std::vector<int>& params);
/** @brief Opens a video using data stream.
@overload
The `params` parameter allows to specify extra parameters encoded as pairs `(paramId_1, paramValue_1, paramId_2, paramValue_2, ...)`.
See cv::VideoCaptureProperties
@return `true` if the file has been successfully opened
The method first calls VideoCapture::release to close the already opened file or camera.
*/
CV_WRAP virtual bool open(const Ptr<IStreamReader>& source, int apiPreference, const std::vector<int>& params);
/** @brief Returns true if video capturing has been initialized already.
If the previous call to VideoCapture constructor or VideoCapture::open() succeeded, the method returns

View File

@ -35,6 +35,9 @@ CV_EXPORTS_W std::vector<VideoCaptureAPIs> getCameraBackends();
/** @brief Returns list of available backends which works via `cv::VideoCapture(filename)` */
CV_EXPORTS_W std::vector<VideoCaptureAPIs> getStreamBackends();
/** @brief Returns list of available backends which works via `cv::VideoCapture(buffer)` */
CV_EXPORTS_W std::vector<VideoCaptureAPIs> getStreamBufferedBackends();
/** @brief Returns list of available backends which works via `cv::VideoWriter()` */
CV_EXPORTS_W std::vector<VideoCaptureAPIs> getWriterBackends();
@ -58,6 +61,13 @@ CV_EXPORTS_W std::string getStreamBackendPluginVersion(
CV_OUT int& version_API
);
/** @brief Returns description and ABI/API version of videoio plugin's buffer capture interface */
CV_EXPORTS_W std::string getStreamBufferedBackendPluginVersion(
VideoCaptureAPIs api,
CV_OUT int& version_ABI,
CV_OUT int& version_API
);
/** @brief Returns description and ABI/API version of videoio plugin's writer interface */
CV_EXPORTS_W std::string getWriterBackendPluginVersion(
VideoCaptureAPIs api,

View File

@ -10,6 +10,37 @@
namespace cv {
CV_EXPORTS std::string icvExtractPattern(const std::string& filename, unsigned *offset);
class PluginStreamReader : public IStreamReader
{
public:
PluginStreamReader(void* _opaque,
long long (*_read)(void* opaque, char* buffer, long long size),
long long (*_seek)(void* opaque, long long offset, int way))
{
opaque = _opaque;
readCallback = _read;
seekCallback = _seek;
}
virtual ~PluginStreamReader() {}
long long read(char* buffer, long long size) override
{
return readCallback(opaque, buffer, size);
}
long long seek(long long offset, int way) override
{
return seekCallback(opaque, offset, way);
}
private:
void* opaque;
long long (*readCallback)(void* opaque, char* buffer, long long size);
long long (*seekCallback)(void* opaque, long long offset, int way);
};
}
#endif // OPENCV_VIDEOIO_UTILS_PRIVATE_HPP

View File

@ -14,7 +14,8 @@
"func_arg_fix" : {
"VideoCapture" : {
"(BOOL)open:(int)index apiPreference:(int)apiPreference" : { "open" : {"name" : "openWithIndex"} },
"(BOOL)open:(int)index apiPreference:(int)apiPreference params:(IntVector*)params" : { "open" : {"name" : "openWithIndexAndParameters"} }
"(BOOL)open:(int)index apiPreference:(int)apiPreference params:(IntVector*)params" : { "open" : {"name" : "openWithIndexAndParameters"} },
"(BOOL)open:(IStreamReader*)source apiPreference:(int)apiPreference params:(IntVector*)params" : { "open" : {"name" : "openWithStreamReader"} }
}
}
}

View File

@ -31,4 +31,114 @@ template<> bool pyopencv_to(PyObject* obj, cv::VideoCapture& stream, const ArgIn
return true;
}
class PythonStreamReader : public cv::IStreamReader
{
public:
PythonStreamReader(PyObject* _obj = nullptr) : obj(_obj)
{
if (obj)
Py_INCREF(obj);
}
~PythonStreamReader()
{
if (obj)
Py_DECREF(obj);
}
long long read(char* buffer, long long size) CV_OVERRIDE
{
if (!obj)
return 0;
PyObject* ioBase = reinterpret_cast<PyObject*>(obj);
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
PyObject* py_size = pyopencv_from(static_cast<int>(size));
PyObject* res = PyObject_CallMethodObjArgs(ioBase, PyString_FromString("read"), py_size, NULL);
bool hasPyReadError = PyErr_Occurred() != nullptr;
char* src = PyBytes_AsString(res);
size_t len = static_cast<size_t>(PyBytes_Size(res));
bool hasPyBytesError = PyErr_Occurred() != nullptr;
if (src && len <= static_cast<size_t>(size))
{
std::memcpy(buffer, src, len);
}
Py_DECREF(res);
Py_DECREF(py_size);
PyGILState_Release(gstate);
if (hasPyReadError)
CV_Error(cv::Error::StsError, "Python .read() call error");
if (hasPyBytesError)
CV_Error(cv::Error::StsError, "Python buffer access error");
CV_CheckLE(len, static_cast<size_t>(size), "Stream chunk size should be less or equal than requested size");
return len;
}
long long seek(long long offset, int way) CV_OVERRIDE
{
if (!obj)
return 0;
PyObject* ioBase = reinterpret_cast<PyObject*>(obj);
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
PyObject* py_offset = pyopencv_from(static_cast<int>(offset));
PyObject* py_whence = pyopencv_from(way);
PyObject* res = PyObject_CallMethodObjArgs(ioBase, PyString_FromString("seek"), py_offset, py_whence, NULL);
bool hasPySeekError = PyErr_Occurred() != nullptr;
long long pos = PyLong_AsLongLong(res);
bool hasPyConvertError = PyErr_Occurred() != nullptr;
Py_DECREF(res);
Py_DECREF(py_offset);
Py_DECREF(py_whence);
PyGILState_Release(gstate);
if (hasPySeekError)
CV_Error(cv::Error::StsError, "Python .seek() call error");
if (hasPyConvertError)
CV_Error(cv::Error::StsError, "Python .seek() result => long long conversion error");
return pos;
}
private:
PyObject* obj;
};
template<>
bool pyopencv_to(PyObject* obj, Ptr<cv::IStreamReader>& p, const ArgInfo&)
{
if (!obj)
return false;
PyObject* ioModule = PyImport_ImportModule("io");
PyObject* type = PyObject_GetAttrString(ioModule, "BufferedIOBase");
Py_DECREF(ioModule);
bool isValidPyType = PyObject_IsInstance(obj, type) == 1;
Py_DECREF(type);
if (!isValidPyType)
{
PyErr_SetString(PyExc_TypeError, "Input stream should be derived from io.BufferedIOBase");
return false;
}
if (!PyErr_Occurred()) {
p = makePtr<PythonStreamReader>(obj);
return true;
}
return false;
}
#endif // HAVE_OPENCV_VIDEOIO

View File

@ -3,6 +3,8 @@ from __future__ import print_function
import numpy as np
import cv2 as cv
import io
import sys
from tests_common import NewOpenCVTests
@ -21,5 +23,69 @@ class Bindings(NewOpenCVTests):
for backend in backends:
self.check_name(cv.videoio_registry.getBackendName(backend))
def test_capture_stream_file(self):
if sys.version_info[0] < 3:
raise self.skipTest('Python 3.x required')
api_pref = None
for backend in cv.videoio_registry.getStreamBufferedBackends():
if not cv.videoio_registry.hasBackend(backend):
continue
if not cv.videoio_registry.isBackendBuiltIn(backend):
_, abi, api = cv.videoio_registry.getStreamBufferedBackendPluginVersion(backend)
if (abi < 1 or (abi == 1 and api < 2)):
continue
api_pref = backend
break
if not api_pref:
raise self.skipTest("No available backends")
with open(self.find_file("cv/video/768x576.avi"), "rb") as f:
cap = cv.VideoCapture(f, api_pref, [])
self.assertTrue(cap.isOpened())
hasFrame, frame = cap.read()
self.assertTrue(hasFrame)
self.assertEqual(frame.shape, (576, 768, 3))
def test_capture_stream_buffer(self):
if sys.version_info[0] < 3:
raise self.skipTest('Python 3.x required')
api_pref = None
for backend in cv.videoio_registry.getStreamBufferedBackends():
if not cv.videoio_registry.hasBackend(backend):
continue
if not cv.videoio_registry.isBackendBuiltIn(backend):
_, abi, api = cv.videoio_registry.getStreamBufferedBackendPluginVersion(backend)
if (abi < 1 or (abi == 1 and api < 2)):
continue
api_pref = backend
break
if not api_pref:
raise self.skipTest("No available backends")
class BufferStream(io.BufferedIOBase):
def __init__(self, filepath):
self.f = open(filepath, "rb")
def read(self, size=-1):
return self.f.read(size)
def seek(self, offset, whence):
return self.f.seek(offset, whence)
def __del__(self):
self.f.close()
stream = BufferStream(self.find_file("cv/video/768x576.avi"))
cap = cv.VideoCapture(stream, api_pref, [])
self.assertTrue(cap.isOpened())
hasFrame, frame = cap.read()
self.assertTrue(hasFrame)
self.assertEqual(frame.shape, (576, 768, 3))
if __name__ == '__main__':
NewOpenCVTests.bootstrap()

View File

@ -18,6 +18,7 @@ public:
virtual ~IBackend() {}
virtual Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const = 0;
virtual Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const = 0;
virtual Ptr<IVideoCapture> createCapture(const Ptr<IStreamReader>&stream, const VideoCaptureParameters& params) const = 0;
virtual Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps, const cv::Size& sz,
const VideoWriterParameters& params) const = 0;
};
@ -34,15 +35,19 @@ public:
typedef Ptr<IVideoCapture> (*FN_createCaptureFile)(const std::string & filename);
typedef Ptr<IVideoCapture> (*FN_createCaptureCamera)(int camera);
typedef Ptr<IVideoCapture> (*FN_createCaptureStream)(const Ptr<IStreamReader>& stream);
typedef Ptr<IVideoCapture> (*FN_createCaptureFileWithParams)(const std::string & filename, const VideoCaptureParameters& params);
typedef Ptr<IVideoCapture> (*FN_createCaptureCameraWithParams)(int camera, const VideoCaptureParameters& params);
typedef Ptr<IVideoCapture> (*FN_createCaptureStreamWithParams)(const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params);
typedef Ptr<IVideoWriter> (*FN_createWriter)(const std::string& filename, int fourcc, double fps, const Size& sz,
const VideoWriterParameters& params);
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFile createCaptureFile,
FN_createCaptureCamera createCaptureCamera,
FN_createCaptureStream createCaptureStream,
FN_createWriter createWriter);
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFileWithParams createCaptureFile,
FN_createCaptureCameraWithParams createCaptureCamera,
FN_createCaptureStreamWithParams createCaptureStream,
FN_createWriter createWriter);
Ptr<IBackendFactory> createPluginBackendFactory(VideoCaptureAPIs id, const char* baseName);

View File

@ -208,6 +208,7 @@ public:
Ptr<IVideoCapture> createCapture(int camera, const VideoCaptureParameters& params) const CV_OVERRIDE;
Ptr<IVideoCapture> createCapture(const std::string &filename) const;
Ptr<IVideoCapture> createCapture(const std::string &filename, const VideoCaptureParameters& params) const CV_OVERRIDE;
Ptr<IVideoCapture> createCapture(const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params) const CV_OVERRIDE;
Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps,
const cv::Size& sz, const VideoWriterParameters& params) const CV_OVERRIDE;
@ -447,16 +448,52 @@ class PluginCapture : public cv::IVideoCapture
{
const OpenCV_VideoIO_Capture_Plugin_API* plugin_api_;
CvPluginCapture capture_;
Ptr<IStreamReader> readStream_;
public:
static
Ptr<PluginCapture> create(const OpenCV_VideoIO_Capture_Plugin_API* plugin_api,
const std::string &filename, int camera, const VideoCaptureParameters& params)
const std::string &filename, const Ptr<IStreamReader>& stream, int camera, const VideoCaptureParameters& params)
{
CV_Assert(plugin_api);
CV_Assert(plugin_api->v0.Capture_release);
CvPluginCapture capture = NULL;
if (stream && plugin_api->api_header.api_version >= 2 && plugin_api->v2.Capture_open_stream)
{
std::vector<int> vint_params = params.getIntVector();
int* c_params = vint_params.data();
unsigned n_params = (unsigned)(vint_params.size() / 2);
if (CV_ERROR_OK == plugin_api->v2.Capture_open_stream(
stream.get(),
[](void* opaque, char* buffer, long long size) -> long long {
CV_LOG_VERBOSE(NULL, 0, "IStreamReader::read(" << size << ")...");
auto is = reinterpret_cast<IStreamReader*>(opaque);
try {
return is->read(buffer, size);
} catch (...) {
CV_LOG_WARNING(NULL, "IStreamReader::read(" << size << ") failed");
return 0;
}
},
[](void* opaque, long long offset, int way) -> long long {
CV_LOG_VERBOSE(NULL, 0, "IStreamReader::seek(" << offset << ", way=" << way << ")...");
auto is = reinterpret_cast<IStreamReader*>(opaque);
try {
return is->seek(offset, way);
} catch (...) {
CV_LOG_WARNING(NULL, "IStreamReader::seek(" << offset << ", way=" << way << ") failed");
return -1;
}
}, c_params, n_params, &capture))
{
CV_Assert(capture);
return makePtr<PluginCapture>(plugin_api, capture, stream);
}
}
else if (stream)
return Ptr<PluginCapture>();
if (plugin_api->api_header.api_version >= 1 && plugin_api->v1.Capture_open_with_params)
{
@ -488,8 +525,8 @@ public:
return Ptr<PluginCapture>();
}
PluginCapture(const OpenCV_VideoIO_Capture_Plugin_API* plugin_api, CvPluginCapture capture)
: plugin_api_(plugin_api), capture_(capture)
PluginCapture(const OpenCV_VideoIO_Capture_Plugin_API* plugin_api, CvPluginCapture capture, const Ptr<IStreamReader>& readStream = Ptr<IStreamReader>())
: plugin_api_(plugin_api), capture_(capture), readStream_(readStream)
{
CV_Assert(plugin_api_); CV_Assert(capture_);
}
@ -661,7 +698,7 @@ Ptr<IVideoCapture> PluginBackend::createCapture(int camera, const VideoCapturePa
try
{
if (capture_api_)
return PluginCapture::create(capture_api_, std::string(), camera, params); //.staticCast<IVideoCapture>();
return PluginCapture::create(capture_api_, std::string(), nullptr, camera, params); //.staticCast<IVideoCapture>();
if (plugin_api_)
{
Ptr<IVideoCapture> cap = legacy::PluginCapture::create(plugin_api_, std::string(), camera); //.staticCast<IVideoCapture>();
@ -685,7 +722,7 @@ Ptr<IVideoCapture> PluginBackend::createCapture(const std::string &filename, con
try
{
if (capture_api_)
return PluginCapture::create(capture_api_, filename, 0, params); //.staticCast<IVideoCapture>();
return PluginCapture::create(capture_api_, filename, nullptr, 0, params); //.staticCast<IVideoCapture>();
if (plugin_api_)
{
Ptr<IVideoCapture> cap = legacy::PluginCapture::create(plugin_api_, filename, 0); //.staticCast<IVideoCapture>();
@ -704,6 +741,25 @@ Ptr<IVideoCapture> PluginBackend::createCapture(const std::string &filename, con
return Ptr<IVideoCapture>();
}
Ptr<IVideoCapture> PluginBackend::createCapture(const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params) const
{
try
{
if (capture_api_)
return PluginCapture::create(capture_api_, std::string(), stream, 0, params); //.staticCast<IVideoCapture>();
if (plugin_api_)
{
CV_Error(Error::StsNotImplemented, "Legacy plugin API for stream capture");
}
}
catch (...)
{
CV_LOG_DEBUG(NULL, "Video I/O: can't open stream capture");
throw;
}
return Ptr<IVideoCapture>();
}
Ptr<IVideoWriter> PluginBackend::createWriter(const std::string& filename, int fourcc, double fps,
const cv::Size& sz, const VideoWriterParameters& params) const
{

View File

@ -36,10 +36,11 @@ class StaticBackend: public IBackend
public:
FN_createCaptureFile fn_createCaptureFile_;
FN_createCaptureCamera fn_createCaptureCamera_;
FN_createCaptureStream fn_createCaptureStream_;
FN_createWriter fn_createWriter_;
StaticBackend(FN_createCaptureFile fn_createCaptureFile, FN_createCaptureCamera fn_createCaptureCamera, FN_createWriter fn_createWriter)
: fn_createCaptureFile_(fn_createCaptureFile), fn_createCaptureCamera_(fn_createCaptureCamera), fn_createWriter_(fn_createWriter)
StaticBackend(FN_createCaptureFile fn_createCaptureFile, FN_createCaptureCamera fn_createCaptureCamera, FN_createCaptureStream fn_createCaptureStream, FN_createWriter fn_createWriter)
: fn_createCaptureFile_(fn_createCaptureFile), fn_createCaptureCamera_(fn_createCaptureCamera), fn_createCaptureStream_(fn_createCaptureStream), fn_createWriter_(fn_createWriter)
{
// nothing
}
@ -72,6 +73,19 @@ public:
}
return Ptr<IVideoCapture>();
}
Ptr<IVideoCapture> createCapture(const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params) const CV_OVERRIDE
{
if (fn_createCaptureStream_)
{
Ptr<IVideoCapture> cap = fn_createCaptureStream_(stream);
if (cap && !params.empty())
{
applyParametersFallback(cap, params);
}
return cap;
}
return Ptr<IVideoCapture>();
}
Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps,
const cv::Size& sz, const VideoWriterParameters& params) const CV_OVERRIDE
{
@ -87,8 +101,8 @@ protected:
Ptr<StaticBackend> backend;
public:
StaticBackendFactory(FN_createCaptureFile createCaptureFile, FN_createCaptureCamera createCaptureCamera, FN_createWriter createWriter)
: backend(makePtr<StaticBackend>(createCaptureFile, createCaptureCamera, createWriter))
StaticBackendFactory(FN_createCaptureFile createCaptureFile, FN_createCaptureCamera createCaptureCamera, FN_createCaptureStream createCaptureStream, FN_createWriter createWriter)
: backend(makePtr<StaticBackend>(createCaptureFile, createCaptureCamera, createCaptureStream, createWriter))
{
// nothing
}
@ -106,9 +120,10 @@ public:
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFile createCaptureFile,
FN_createCaptureCamera createCaptureCamera,
FN_createCaptureStream createCaptureStream,
FN_createWriter createWriter)
{
return makePtr<StaticBackendFactory>(createCaptureFile, createCaptureCamera, createWriter).staticCast<IBackendFactory>();
return makePtr<StaticBackendFactory>(createCaptureFile, createCaptureCamera, createCaptureStream, createWriter).staticCast<IBackendFactory>();
}
@ -118,10 +133,11 @@ class StaticBackendWithParams: public IBackend
public:
FN_createCaptureFileWithParams fn_createCaptureFile_;
FN_createCaptureCameraWithParams fn_createCaptureCamera_;
FN_createCaptureStreamWithParams fn_createCaptureStream_;
FN_createWriter fn_createWriter_;
StaticBackendWithParams(FN_createCaptureFileWithParams fn_createCaptureFile, FN_createCaptureCameraWithParams fn_createCaptureCamera, FN_createWriter fn_createWriter)
: fn_createCaptureFile_(fn_createCaptureFile), fn_createCaptureCamera_(fn_createCaptureCamera), fn_createWriter_(fn_createWriter)
StaticBackendWithParams(FN_createCaptureFileWithParams fn_createCaptureFile, FN_createCaptureCameraWithParams fn_createCaptureCamera, FN_createCaptureStreamWithParams fn_createCaptureStream, FN_createWriter fn_createWriter)
: fn_createCaptureFile_(fn_createCaptureFile), fn_createCaptureCamera_(fn_createCaptureCamera), fn_createCaptureStream_(fn_createCaptureStream), fn_createWriter_(fn_createWriter)
{
// nothing
}
@ -140,6 +156,12 @@ public:
return fn_createCaptureFile_(filename, params);
return Ptr<IVideoCapture>();
}
Ptr<IVideoCapture> createCapture(const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params) const CV_OVERRIDE
{
if (fn_createCaptureStream_)
return fn_createCaptureStream_(stream, params);
return Ptr<IVideoCapture>();
}
Ptr<IVideoWriter> createWriter(const std::string& filename, int fourcc, double fps,
const cv::Size& sz, const VideoWriterParameters& params) const CV_OVERRIDE
{
@ -155,8 +177,8 @@ protected:
Ptr<StaticBackendWithParams> backend;
public:
StaticBackendWithParamsFactory(FN_createCaptureFileWithParams createCaptureFile, FN_createCaptureCameraWithParams createCaptureCamera, FN_createWriter createWriter)
: backend(makePtr<StaticBackendWithParams>(createCaptureFile, createCaptureCamera, createWriter))
StaticBackendWithParamsFactory(FN_createCaptureFileWithParams createCaptureFile, FN_createCaptureCameraWithParams createCaptureCamera, FN_createCaptureStreamWithParams createCaptureStream, FN_createWriter createWriter)
: backend(makePtr<StaticBackendWithParams>(createCaptureFile, createCaptureCamera, createCaptureStream, createWriter))
{
// nothing
}
@ -174,9 +196,10 @@ public:
Ptr<IBackendFactory> createBackendFactory(FN_createCaptureFileWithParams createCaptureFile,
FN_createCaptureCameraWithParams createCaptureCamera,
FN_createCaptureStreamWithParams createCaptureStream,
FN_createWriter createWriter)
{
return makePtr<StaticBackendWithParamsFactory>(createCaptureFile, createCaptureCamera, createWriter).staticCast<IBackendFactory>();
return makePtr<StaticBackendWithParamsFactory>(createCaptureFile, createCaptureCamera, createCaptureStream, createWriter).staticCast<IBackendFactory>();
}

View File

@ -65,6 +65,10 @@ static bool param_VIDEOWRITER_DEBUG = utils::getConfigurationParameterBool("OPEN
void DefaultDeleter<CvCapture>::operator ()(CvCapture* obj) const { cvReleaseCapture(&obj); }
void DefaultDeleter<CvVideoWriter>::operator ()(CvVideoWriter* obj) const { cvReleaseVideoWriter(&obj); }
IStreamReader::~IStreamReader()
{
// nothing
}
VideoCapture::VideoCapture() : throwOnFail(false)
{}
@ -82,6 +86,13 @@ VideoCapture::VideoCapture(const String& filename, int apiPreference, const std:
open(filename, apiPreference, params);
}
VideoCapture::VideoCapture(const Ptr<IStreamReader>& source, int apiPreference, const std::vector<int>& params)
: throwOnFail(false)
{
CV_TRACE_FUNCTION();
open(source, apiPreference, params);
}
VideoCapture::VideoCapture(int index, int apiPreference) : throwOnFail(false)
{
CV_TRACE_FUNCTION();
@ -188,7 +199,7 @@ bool VideoCapture::open(const String& filename, int apiPreference, const std::ve
else
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): backend is not available "
cv::format("VIDEOIO(%s): backend is not available "
"(plugin is missing, or can't be loaded due "
"dependencies or it is not compatible)",
info.name));
@ -228,6 +239,131 @@ bool VideoCapture::open(const String& filename, int apiPreference, const std::ve
return false;
}
bool VideoCapture::open(const Ptr<IStreamReader>& stream, int apiPreference, const std::vector<int>& params)
{
CV_INSTRUMENT_REGION();
if (apiPreference == CAP_ANY)
{
CV_Error_(Error::StsBadArg, ("Avoid CAP_ANY - explicit backend expected to avoid read data stream reset"));
}
if (isOpened())
{
release();
}
const VideoCaptureParameters parameters(params);
const std::vector<VideoBackendInfo> backends = cv::videoio_registry::getAvailableBackends_CaptureByStream();
for (size_t i = 0; i < backends.size(); i++)
{
const VideoBackendInfo& info = backends[i];
if (apiPreference != info.id)
continue;
if (!info.backendFactory)
{
CV_LOG_DEBUG(NULL, "VIDEOIO(" << info.name << "): factory is not available (plugins require filesystem support)");
continue;
}
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): trying capture buffer ...",
info.name));
CV_Assert(!info.backendFactory.empty());
const Ptr<IBackend> backend = info.backendFactory->getBackend();
if (!backend.empty())
{
try
{
icap = backend->createCapture(stream, parameters);
if (!icap.empty())
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): created, isOpened=%d",
info.name, icap->isOpened()));
if (icap->isOpened())
{
return true;
}
icap.release();
}
else
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): can't create capture",
info.name));
}
}
catch (const cv::Exception& e)
{
if (throwOnFail)
{
throw;
}
CV_LOG_WARNING(NULL,
cv::format("VIDEOIO(%s): raised OpenCV exception:\n\n%s\n",
info.name, e.what()));
}
catch (const std::exception& e)
{
if (throwOnFail)
{
throw;
}
CV_LOG_WARNING(NULL, cv::format("VIDEOIO(%s): raised C++ exception:\n\n%s\n",
info.name, e.what()));
}
catch (...)
{
if (throwOnFail)
{
throw;
}
CV_LOG_WARNING(NULL,
cv::format("VIDEOIO(%s): raised unknown C++ exception!\n\n",
info.name));
}
}
else
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): backend is not available "
"(plugin is missing, or can't be loaded due "
"dependencies or it is not compatible)",
info.name));
}
}
bool found = cv::videoio_registry::isBackendBuiltIn(static_cast<VideoCaptureAPIs>(apiPreference));
if (found)
{
CV_LOG_WARNING(NULL, cv::format("VIDEOIO(%s): backend is generally available "
"but can't be used to capture by read data stream",
cv::videoio_registry::getBackendName(static_cast<VideoCaptureAPIs>(apiPreference)).c_str()));
}
if (throwOnFail)
{
CV_Error_(Error::StsError, ("could not open read data stream"));
}
if (cv::videoio_registry::checkDeprecatedBackend(apiPreference))
{
CV_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): backend is removed from OpenCV",
cv::videoio_registry::getBackendName((VideoCaptureAPIs) apiPreference).c_str()));
}
else
{
CV_LOG_DEBUG(NULL, "VIDEOIO: choosen backend does not work or wrong. "
"Please make sure that your computer support chosen backend and OpenCV built "
"with right flags.");
}
return false;
}
bool VideoCapture::open(int cameraNum, int apiPreference)
{
return open(cameraNum, apiPreference, std::vector<int>());
@ -326,7 +462,7 @@ bool VideoCapture::open(int cameraNum, int apiPreference, const std::vector<int>
else
{
CV_CAPTURE_LOG_DEBUG(NULL,
cv::format("VIDEOIO(%s): backend is not available "
cv::format("VIDEOIO(%s): backend is not available "
"(plugin is missing, or can't be loaded due "
"dependencies or it is not compatible)",
info.name));

View File

@ -21,10 +21,20 @@
#define INPUT_TIMEOUT_MS 2000
#define COLOR_FormatUnknown -1
#define COLOR_FormatYUV420Planar 19
#define COLOR_FormatYUV420SemiPlanar 21
#define COLOR_FormatSurface 0x7f000789 //See https://developer.android.com/reference/android/media/MediaCodecInfo.CodecCapabilities for codes
#define FOURCC_BGR CV_FOURCC_MACRO('B','G','R','3')
#define FOURCC_RGB CV_FOURCC_MACRO('R','G','B','3')
#define FOURCC_BGRA CV_FOURCC_MACRO('B','G','R','4')
#define FOURCC_RGBA CV_FOURCC_MACRO('R','G','B','4')
#define FOURCC_GRAY CV_FOURCC_MACRO('G','R','E','Y')
#define FOURCC_NV12 CV_FOURCC_MACRO('N','V','1','2')
#define FOURCC_YV12 CV_FOURCC_MACRO('Y','V','1','2')
#define FOURCC_UNKNOWN 0xFFFFFFFF
using namespace cv;
#define TAG "NativeCodec"
@ -51,9 +61,9 @@ class AndroidMediaNdkCapture : public IVideoCapture
public:
AndroidMediaNdkCapture():
sawInputEOS(false), sawOutputEOS(false),
frameStride(0), frameWidth(0), frameHeight(0), colorFormat(0),
videoWidth(0), videoHeight(0),
videoFrameCount(0),
frameStride(0), frameWidth(0), frameHeight(0),
colorFormat(COLOR_FormatUnknown), fourCC(FOURCC_BGR),
videoWidth(0), videoHeight(0), videoFrameCount(0),
videoRotation(0), videoRotationCode(-1),
videoOrientationAuto(false) {}
@ -65,6 +75,7 @@ public:
int32_t frameWidth;
int32_t frameHeight;
int32_t colorFormat;
uint32_t fourCC;
int32_t videoWidth;
int32_t videoHeight;
float videoFrameRate;
@ -73,7 +84,6 @@ public:
int32_t videoRotationCode;
bool videoOrientationAuto;
std::vector<uint8_t> buffer;
Mat frame;
~AndroidMediaNdkCapture() { cleanUp(); }
@ -157,23 +167,51 @@ public:
return false;
}
Mat yuv(frameHeight + frameHeight/2, frameStride, CV_8UC1, buffer.data());
ColorConversionCodes ccCode;
const Mat yuv(frameHeight + frameHeight/2,
frameWidth, CV_8UC1, buffer.data(), frameStride);
if (colorFormat == COLOR_FormatYUV420Planar) {
cv::cvtColor(yuv, frame, cv::COLOR_YUV2BGR_YV12);
switch(fourCC)
{
case FOURCC_BGR: ccCode = COLOR_YUV2BGR_YV12; break;
case FOURCC_RGB: ccCode = COLOR_YUV2RGB_YV12; break;
case FOURCC_BGRA: ccCode = COLOR_YUV2BGRA_YV12; break;
case FOURCC_RGBA: ccCode = COLOR_YUV2RGBA_YV12; break;
case FOURCC_GRAY: ccCode = COLOR_YUV2GRAY_YV12; break;
case FOURCC_YV12: break;
case FOURCC_UNKNOWN: fourCC = FOURCC_YV12; break;
default: LOGE("Unexpected FOURCC value: %d", fourCC);
return false;
}
} else if (colorFormat == COLOR_FormatYUV420SemiPlanar) {
cv::cvtColor(yuv, frame, cv::COLOR_YUV2BGR_NV21);
// Attention: COLOR_FormatYUV420SemiPlanar seems to correspond to NV12.
// This is different from the Camera2 interface, where NV21
// is used in this situation.
switch(fourCC)
{
case FOURCC_BGR: ccCode = COLOR_YUV2BGR_NV12; break;
case FOURCC_RGB: ccCode = COLOR_YUV2RGB_NV12; break;
case FOURCC_BGRA: ccCode = COLOR_YUV2BGRA_NV12; break;
case FOURCC_RGBA: ccCode = COLOR_YUV2RGBA_NV12; break;
case FOURCC_GRAY: ccCode = COLOR_YUV2GRAY_NV12; break;
case FOURCC_NV12: break;
case FOURCC_UNKNOWN: fourCC = FOURCC_NV12; break;
default: LOGE("Unexpected FOURCC value: %d", fourCC);
return false;
}
} else {
LOGE("Unsupported video format: %d", colorFormat);
return false;
}
Mat croppedFrame = frame(Rect(0, 0, videoWidth, videoHeight));
out.assign(croppedFrame);
if (fourCC == FOURCC_YV12 || fourCC == FOURCC_NV12)
yuv.copyTo(out);
else
cvtColor(yuv, out, ccCode);
if (videoOrientationAuto && -1 != videoRotationCode) {
cv::rotate(out, out, videoRotationCode);
}
if (videoOrientationAuto && -1 != videoRotationCode)
rotate(out, out, videoRotationCode);
return true;
}
@ -194,8 +232,11 @@ public:
case CAP_PROP_FRAME_COUNT: return videoFrameCount;
case CAP_PROP_ORIENTATION_META: return videoRotation;
case CAP_PROP_ORIENTATION_AUTO: return videoOrientationAuto ? 1 : 0;
case CAP_PROP_FOURCC: return fourCC;
}
return 0;
// unknown parameter or value not available
return -1;
}
bool setProperty(int property_id, double value) CV_OVERRIDE
@ -206,6 +247,31 @@ public:
videoOrientationAuto = value != 0 ? true : false;
return true;
}
case CAP_PROP_FOURCC: {
uint32_t newFourCC = cvRound(value);
switch (newFourCC)
{
case FOURCC_BGR:
case FOURCC_RGB:
case FOURCC_BGRA:
case FOURCC_RGBA:
case FOURCC_GRAY:
fourCC = newFourCC;
return true;
case FOURCC_YV12:
if (colorFormat != COLOR_FormatYUV420SemiPlanar) {
fourCC = (colorFormat == COLOR_FormatUnknown) ? FOURCC_UNKNOWN : FOURCC_YV12;
return true;
}
break;
case FOURCC_NV12:
if (colorFormat != COLOR_FormatYUV420Planar) {
fourCC = (colorFormat == COLOR_FormatUnknown) ? FOURCC_UNKNOWN : FOURCC_NV12;
return true;
}
break;
}
}
}
return false;

View File

@ -74,6 +74,11 @@ public:
{
open(filename, params);
}
CvCapture_FFMPEG_proxy(const Ptr<IStreamReader>& stream, const cv::VideoCaptureParameters& params)
: ffmpegCapture(NULL)
{
open(stream, params);
}
virtual ~CvCapture_FFMPEG_proxy() { close(); }
virtual double getProperty_(int propId) const CV_OVERRIDE
@ -122,6 +127,14 @@ public:
ffmpegCapture = cvCreateFileCaptureWithParams_FFMPEG(filename.c_str(), params);
return ffmpegCapture != 0;
}
bool open(const Ptr<IStreamReader>& stream, const cv::VideoCaptureParameters& params)
{
close();
readStream = stream; // Increase counter
ffmpegCapture = cvCreateStreamCaptureWithParams_FFMPEG(stream, params);
return ffmpegCapture != 0;
}
void close()
{
if (ffmpegCapture)
@ -135,6 +148,7 @@ public:
protected:
CvCapture_FFMPEG* ffmpegCapture;
Ptr<IStreamReader> readStream;
};
} // namespace
@ -147,6 +161,14 @@ cv::Ptr<cv::IVideoCapture> cvCreateFileCapture_FFMPEG_proxy(const std::string &f
return cv::Ptr<cv::IVideoCapture>();
}
cv::Ptr<cv::IVideoCapture> cvCreateStreamCapture_FFMPEG_proxy(const Ptr<IStreamReader>& stream, const cv::VideoCaptureParameters& params)
{
cv::Ptr<CvCapture_FFMPEG_proxy> capture = std::make_shared<CvCapture_FFMPEG_proxy>(stream, params);
if (capture && capture->isOpened())
return capture;
return cv::Ptr<cv::IVideoCapture>();
}
namespace {
class CvVideoWriter_FFMPEG_proxy CV_FINAL :
@ -234,7 +256,7 @@ cv::Ptr<cv::IVideoWriter> cvCreateVideoWriter_FFMPEG_proxy(const std::string& fi
#include "plugin_api.hpp"
#else
#define CAPTURE_ABI_VERSION 1
#define CAPTURE_API_VERSION 1
#define CAPTURE_API_VERSION 2
#include "plugin_capture_api.hpp"
#define WRITER_ABI_VERSION 1
#define WRITER_API_VERSION 1
@ -255,7 +277,7 @@ CvResult CV_API_CALL cv_capture_open(const char* filename, int camera_index, CV_
CvCapture_FFMPEG_proxy *cap = 0;
try
{
cap = new CvCapture_FFMPEG_proxy(filename, cv::VideoCaptureParameters());
cap = new CvCapture_FFMPEG_proxy(String(filename), cv::VideoCaptureParameters());
if (cap->isOpened())
{
*handle = (CvPluginCapture)cap;
@ -292,7 +314,43 @@ CvResult CV_API_CALL cv_capture_open_with_params(
try
{
cv::VideoCaptureParameters parameters(params, n_params);
cap = new CvCapture_FFMPEG_proxy(filename, parameters);
cap = new CvCapture_FFMPEG_proxy(String(filename), parameters);
if (cap->isOpened())
{
*handle = (CvPluginCapture)cap;
return CV_ERROR_OK;
}
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "FFmpeg: Exception is raised: " << e.what());
}
catch (...)
{
CV_LOG_WARNING(NULL, "FFmpeg: Unknown C++ exception is raised");
}
if (cap)
delete cap;
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_capture_open_buffer(
void* opaque,
long long(*read)(void* opaque, char* buffer, long long size),
long long(*seek)(void* opaque, long long offset, int way),
int* params, unsigned n_params,
CV_OUT CvPluginCapture* handle
)
{
if (!handle)
return CV_ERROR_FAIL;
*handle = NULL;
CvCapture_FFMPEG_proxy *cap = 0;
try
{
cv::VideoCaptureParameters parameters(params, n_params);
cap = new CvCapture_FFMPEG_proxy(makePtr<PluginStreamReader>(opaque, read, seek), parameters);
if (cap->isOpened())
{
*handle = (CvPluginCapture)cap;
@ -609,6 +667,9 @@ static const OpenCV_VideoIO_Capture_Plugin_API capture_plugin_api =
},
{
/* 8*/cv_capture_open_with_params,
},
{
/* 9*/cv_capture_open_buffer,
}
};

View File

@ -526,7 +526,7 @@ inline static std::string _opencv_ffmpeg_get_error_string(int error_code)
struct CvCapture_FFMPEG
{
bool open(const char* filename, const VideoCaptureParameters& params);
bool open(const char* filename, const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params);
void close();
double getProperty(int) const;
@ -563,6 +563,8 @@ struct CvCapture_FFMPEG
int64_t pts_in_fps_time_base;
int64_t dts_delay_in_fps_time_base;
AVIOContext * avio_context;
AVPacket packet;
Image_FFMPEG frame;
struct SwsContext *img_convert_ctx;
@ -580,6 +582,8 @@ struct CvCapture_FFMPEG
*/
char * filename;
Ptr<IStreamReader> readStream;
AVDictionary *dict;
#if USE_AV_INTERRUPT_CALLBACK
int open_timeout;
@ -628,11 +632,14 @@ void CvCapture_FFMPEG::init()
avcodec = 0;
context = 0;
avio_context = 0;
frame_number = 0;
eps_zero = 0.000025;
rotation_angle = 0;
readStream.reset();
dict = NULL;
#if USE_AV_INTERRUPT_CALLBACK
@ -730,6 +737,13 @@ void CvCapture_FFMPEG::close()
#endif
}
if (avio_context)
{
av_free(avio_context->buffer);
av_freep(&avio_context);
}
readStream.reset();
init();
}
@ -1019,7 +1033,7 @@ static bool isThreadSafe() {
return threadSafe;
}
bool CvCapture_FFMPEG::open(const char* _filename, const VideoCaptureParameters& params)
bool CvCapture_FFMPEG::open(const char* _filename, const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params)
{
const bool threadSafe = isThreadSafe();
InternalFFMpegRegister::init(threadSafe);
@ -1034,6 +1048,8 @@ bool CvCapture_FFMPEG::open(const char* _filename, const VideoCaptureParameters&
close();
readStream = stream;
if (!params.empty())
{
convertRGB = params.get<bool>(CAP_PROP_CONVERT_RGB, true);
@ -1145,6 +1161,56 @@ bool CvCapture_FFMPEG::open(const char* _filename, const VideoCaptureParameters&
input_format = av_find_input_format(entry->value);
}
if (!_filename)
{
size_t avio_ctx_buffer_size = 4096;
uint8_t* avio_ctx_buffer = (uint8_t*)av_malloc(avio_ctx_buffer_size);
CV_Assert(avio_ctx_buffer);
avio_context = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size, 0, this,
[](void *opaque, uint8_t *buf, int buf_size) -> int {
try {
auto capture = reinterpret_cast<CvCapture_FFMPEG*>(opaque);
auto is = capture->readStream;
int result = (int)is->read(reinterpret_cast<char*>(buf), buf_size);
// https://github.com/FFmpeg/FFmpeg/commit/858db4b01fa2b55ee55056c033054ca54ac9b0fd#diff-863c87afc9bb02fe42d071015fc8218972c80b146d603239f20b483ad0988ae9R394
// https://github.com/FFmpeg/FFmpeg/commit/a606f27f4c610708fa96e35eed7b7537d3d8f712
// https://github.com/FFmpeg/FFmpeg/blob/n4.0/libavformat/version.h#L83C41-L83C73
#if (LIBAVFORMAT_VERSION_MAJOR >= 58) && (LIBAVFORMAT_VERSION_MICRO >= 100) // FFmpeg n4.0+
if (result == 0 && buf_size > 0)
{
result = AVERROR_EOF;
}
#endif
CV_LOG_VERBOSE(NULL, 0, "FFMPEG: IStreamReader::read(" << buf_size << ") = " << result);
return result;
} catch (...) {
CV_LOG_WARNING(NULL, "FFMPEG: IStreamReader::read(" << buf_size << ") failed");
return 0;
}
},
NULL,
[](void *opaque, int64_t offset, int whence) -> int64_t {
try {
int64_t result = -1;
auto capture = reinterpret_cast<CvCapture_FFMPEG*>(opaque);
auto is = capture->readStream;
int origin = whence & (~AVSEEK_FORCE);
if (origin == SEEK_SET || origin == SEEK_CUR || origin == SEEK_END)
{
result = is->seek(offset, origin);
}
CV_LOG_VERBOSE(NULL, 0, "FFMPEG: IStreamReader::seek(" << offset << ", whence=" << whence << ") = " << result);
return result;
} catch (...) {
CV_LOG_WARNING(NULL, "FFMPEG: IStreamReader::seek(" << offset << ", whence=" << whence << ") failed");
return -1;
}
});
CV_Assert(avio_context);
ic->pb = avio_context;
}
int err = avformat_open_input(&ic, _filename, input_format, &dict);
if (err < 0)
@ -3292,16 +3358,30 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
static
CvCapture_FFMPEG* cvCreateFileCaptureWithParams_FFMPEG(const char* filename, const VideoCaptureParameters& params)
{
// FIXIT: remove unsafe malloc() approach
CvCapture_FFMPEG* capture = (CvCapture_FFMPEG*)malloc(sizeof(*capture));
CvCapture_FFMPEG* capture = new CvCapture_FFMPEG();
if (!capture)
return 0;
capture->init();
if (capture->open(filename, params))
if (capture->open(filename, nullptr, params))
return capture;
capture->close();
free(capture);
delete capture;
return 0;
}
static
CvCapture_FFMPEG* cvCreateStreamCaptureWithParams_FFMPEG(const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params)
{
CvCapture_FFMPEG* capture = new CvCapture_FFMPEG();
if (!capture)
return 0;
capture->init();
if (capture->open(nullptr, stream, params))
return capture;
capture->close();
delete capture;
return 0;
}
@ -3310,7 +3390,7 @@ void cvReleaseCapture_FFMPEG(CvCapture_FFMPEG** capture)
if( capture && *capture )
{
(*capture)->close();
free(*capture);
delete *capture;
*capture = 0;
}
}
@ -3344,14 +3424,14 @@ int cvRetrieveFrame2_FFMPEG(CvCapture_FFMPEG* capture, unsigned char** data, int
static CvVideoWriter_FFMPEG* cvCreateVideoWriterWithParams_FFMPEG( const char* filename, int fourcc, double fps,
int width, int height, const VideoWriterParameters& params )
{
CvVideoWriter_FFMPEG* writer = (CvVideoWriter_FFMPEG*)malloc(sizeof(*writer));
CvVideoWriter_FFMPEG* writer = new CvVideoWriter_FFMPEG();
if (!writer)
return 0;
writer->init();
if( writer->open( filename, fourcc, fps, width, height, params ))
return writer;
writer->close();
free(writer);
delete writer;
return 0;
}
@ -3368,7 +3448,7 @@ void cvReleaseVideoWriter_FFMPEG( CvVideoWriter_FFMPEG** writer )
if( writer && *writer )
{
(*writer)->close();
free(*writer);
delete *writer;
*writer = 0;
}
}

View File

@ -9,6 +9,7 @@
#include "opencv2/core/core_c.h"
#include "opencv2/videoio.hpp"
#include "opencv2/videoio/videoio_c.h"
#include "opencv2/videoio/utils.private.hpp"
//===================================================
@ -326,6 +327,7 @@ protected:
//==================================================================================================
Ptr<IVideoCapture> cvCreateFileCapture_FFMPEG_proxy(const std::string &filename, const VideoCaptureParameters& params);
Ptr<IVideoCapture> cvCreateStreamCapture_FFMPEG_proxy(const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params);
Ptr<IVideoWriter> cvCreateVideoWriter_FFMPEG_proxy(const std::string& filename, int fourcc,
double fps, const Size& frameSize,
const VideoWriterParameters& params);
@ -351,6 +353,7 @@ Ptr<IVideoCapture> create_WRT_capture(int device);
Ptr<IVideoCapture> cvCreateCapture_MSMF(int index, const VideoCaptureParameters& params);
Ptr<IVideoCapture> cvCreateCapture_MSMF(const std::string& filename, const VideoCaptureParameters& params);
Ptr<IVideoCapture> cvCreateCapture_MSMF(const Ptr<IStreamReader>& stream, const VideoCaptureParameters& params);
Ptr<IVideoWriter> cvCreateVideoWriter_MSMF(const std::string& filename, int fourcc,
double fps, const Size& frameSize,
const VideoWriterParameters& params);

View File

@ -746,7 +746,7 @@ public:
virtual ~CvCapture_MSMF();
bool configureHW(const cv::VideoCaptureParameters& params);
virtual bool open(int, const cv::VideoCaptureParameters* params);
virtual bool open(const cv::String&, const cv::VideoCaptureParameters* params);
virtual bool open(const cv::String&, const Ptr<IStreamReader>&, const cv::VideoCaptureParameters* params);
virtual void close();
virtual double getProperty(int) const CV_OVERRIDE;
virtual bool setProperty(int, double) CV_OVERRIDE;
@ -789,6 +789,7 @@ protected:
_ComPtr<ID3D11Device> D3DDev;
_ComPtr<IMFDXGIDeviceManager> D3DMgr;
#endif
_ComPtr<IMFByteStream> byteStream;
_ComPtr<IMFSourceReader> videoFileSource;
_ComPtr<IMFSourceReaderCallback> readCallback; // non-NULL for "live" streams (camera capture)
std::vector<DWORD> dwStreamIndices;
@ -1034,7 +1035,7 @@ bool CvCapture_MSMF::configureHW(bool enable)
}
}
// Reopen if needed
return reopen ? (prevcam >= 0 ? open(prevcam, NULL) : open(prevfile.c_str(), NULL)) : true;
return reopen ? (prevcam >= 0 ? open(prevcam, NULL) : open(prevfile.c_str(), nullptr, NULL)) : true;
}
D3DMgr.Release();
}
@ -1050,7 +1051,7 @@ bool CvCapture_MSMF::configureHW(bool enable)
if (D3DDev)
D3DDev.Release();
captureMode = MODE_SW;
return reopen ? (prevcam >= 0 ? open(prevcam, NULL) : open(prevfile.c_str(), NULL)) : true;
return reopen ? (prevcam >= 0 ? open(prevcam, NULL) : open(prevfile.c_str(), nullptr, NULL)) : true;
}
#else
return !enable;
@ -1249,10 +1250,10 @@ bool CvCapture_MSMF::open(int index, const cv::VideoCaptureParameters* params)
return isOpen;
}
bool CvCapture_MSMF::open(const cv::String& _filename, const cv::VideoCaptureParameters* params)
bool CvCapture_MSMF::open(const cv::String& _filename, const Ptr<IStreamReader>& stream, const cv::VideoCaptureParameters* params)
{
close();
if (_filename.empty())
if (_filename.empty() && !stream)
return false;
if (params)
@ -1263,9 +1264,34 @@ bool CvCapture_MSMF::open(const cv::String& _filename, const cv::VideoCapturePar
}
// Set source reader parameters
_ComPtr<IMFAttributes> attr = getDefaultSourceConfig();
cv::AutoBuffer<wchar_t> unicodeFileName(_filename.length() + 1);
MultiByteToWideChar(CP_ACP, 0, _filename.c_str(), -1, unicodeFileName.data(), (int)_filename.length() + 1);
if (SUCCEEDED(MFCreateSourceReaderFromURL(unicodeFileName.data(), attr.Get(), &videoFileSource)))
bool succeeded = false;
if (!_filename.empty())
{
cv::AutoBuffer<wchar_t> unicodeFileName(_filename.length() + 1);
MultiByteToWideChar(CP_ACP, 0, _filename.c_str(), -1, unicodeFileName.data(), (int)_filename.length() + 1);
succeeded = SUCCEEDED(MFCreateSourceReaderFromURL(unicodeFileName.data(), attr.Get(), &videoFileSource));
}
else if (stream)
{
// TODO: implement read by chunks
// FIXIT: save stream in field
std::vector<char> data;
data.resize((size_t)stream->seek(0, SEEK_END));
stream->seek(0, SEEK_SET);
stream->read(data.data(), data.size());
IStream* s = SHCreateMemStream(reinterpret_cast<const BYTE*>(data.data()), static_cast<UINT32>(data.size()));
if (!s)
return false;
succeeded = SUCCEEDED(MFCreateMFByteStreamOnStream(s, &byteStream));
if (!succeeded)
return false;
if (!SUCCEEDED(MFStartup(MF_VERSION)))
return false;
succeeded = SUCCEEDED(MFCreateSourceReaderFromByteStream(byteStream.Get(), attr.Get(), &videoFileSource));
}
if (succeeded)
{
isOpen = true;
usedVideoSampleTime = 0;
@ -2375,12 +2401,24 @@ cv::Ptr<cv::IVideoCapture> cv::cvCreateCapture_MSMF( int index, const cv::VideoC
return cv::Ptr<cv::IVideoCapture>();
}
cv::Ptr<cv::IVideoCapture> cv::cvCreateCapture_MSMF (const cv::String& filename, const cv::VideoCaptureParameters& params)
cv::Ptr<cv::IVideoCapture> cv::cvCreateCapture_MSMF(const cv::String& filename, const cv::VideoCaptureParameters& params)
{
cv::Ptr<CvCapture_MSMF> capture = cv::makePtr<CvCapture_MSMF>();
if (capture)
{
capture->open(filename, &params);
capture->open(filename, nullptr, &params);
if (capture->isOpened())
return capture;
}
return cv::Ptr<cv::IVideoCapture>();
}
cv::Ptr<cv::IVideoCapture> cv::cvCreateCapture_MSMF(const Ptr<IStreamReader>& stream, const cv::VideoCaptureParameters& params)
{
cv::Ptr<CvCapture_MSMF> capture = cv::makePtr<CvCapture_MSMF>();
if (capture)
{
capture->open(std::string(), stream, &params);
if (capture->isOpened())
return capture;
}
@ -2707,7 +2745,7 @@ cv::Ptr<cv::IVideoWriter> cv::cvCreateVideoWriter_MSMF( const std::string& filen
#include "plugin_api.hpp"
#else
#define CAPTURE_ABI_VERSION 1
#define CAPTURE_API_VERSION 1
#define CAPTURE_API_VERSION 2
#include "plugin_capture_api.hpp"
#define WRITER_ABI_VERSION 1
#define WRITER_API_VERSION 1
@ -2736,7 +2774,9 @@ CvResult CV_API_CALL cv_capture_open_with_params(
cap = new CaptureT();
bool res;
if (filename)
res = cap->open(std::string(filename), &parameters);
{
res = cap->open(std::string(filename), nullptr, &parameters);
}
else
res = cap->open(camera_index, &parameters);
if (res)
@ -2758,6 +2798,44 @@ CvResult CV_API_CALL cv_capture_open_with_params(
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_capture_open_buffer(
void* opaque,
long long(*read)(void* opaque, char* buffer, long long size),
long long(*seek)(void* opaque, long long offset, int way),
int* params, unsigned n_params,
CV_OUT CvPluginCapture* handle
)
{
if (!handle)
return CV_ERROR_FAIL;
*handle = NULL;
CaptureT* cap = 0;
try
{
cv::VideoCaptureParameters parameters(params, n_params);
cap = new CaptureT();
bool res = cap->open(std::string(), makePtr<PluginStreamReader>(opaque, read, seek), &parameters);
if (res)
{
*handle = (CvPluginCapture)cap;
return CV_ERROR_OK;
}
}
catch (const std::exception& e)
{
CV_LOG_WARNING(NULL, "MSMF: Exception is raised: " << e.what());
}
catch (...)
{
CV_LOG_WARNING(NULL, "MSMF: Unknown C++ exception is raised");
}
if (cap)
delete cap;
return CV_ERROR_FAIL;
}
static
CvResult CV_API_CALL cv_capture_open(const char* filename, int camera_index, CV_OUT CvPluginCapture* handle)
{
@ -3027,6 +3105,9 @@ static const OpenCV_VideoIO_Capture_Plugin_API capture_plugin_api =
},
{
/* 8*/cv::cv_capture_open_with_params,
},
{
/* 9*/cv::cv_capture_open_buffer,
}
};

View File

@ -13,7 +13,7 @@
/// increased for backward-compatible changes, e.g. add new function
/// Caller API <= Plugin API -> plugin is fully compatible
/// Caller API > Plugin API -> plugin is not fully compatible, caller should use extra checks to use plugins with older API
#define CAPTURE_API_VERSION 1
#define CAPTURE_API_VERSION 2
/// increased for incompatible changes, e.g. remove function argument
/// Caller ABI == Plugin ABI -> plugin is compatible
@ -121,6 +121,29 @@ struct OpenCV_VideoIO_Capture_Plugin_API_v1_1_api_entries
CV_OUT CvPluginCapture* handle);
}; // OpenCV_VideoIO_Capture_Plugin_API_v1_1_api_entries
struct OpenCV_VideoIO_Capture_Plugin_API_v1_2_api_entries
{
/** @brief Open video capture from buffer with parameters
@param opaque A pointer to user data
@param read A pointer to a function that is called to reads @p size bytes to allocated @p buffer. Returns a number of bytes that were actually read
@param seek A pointer to a function that is called to move starting position inside the stream buffer.
@p offset is a number of bytes and @p way is one of the markers SEEK_SET, SEEK_CUR, SEEK_END.
Function returns an absolute current position in bytes.
@param params pointer on 2*n_params array of 'key,value' pairs
@param n_params number of passed parameters
@param[out] handle pointer on Capture handle
@note API-CALL 9, API-Version == 2
*/
CvResult (CV_API_CALL *Capture_open_stream)(
void* opaque,
long long(*read)(void* opaque, char* buffer, long long size),
long long(*seek)(void* opaque, long long offset, int way),
int* params, unsigned n_params,
CV_OUT CvPluginCapture* handle);
}; // OpenCV_VideoIO_Capture_Plugin_API_v1_2_api_entries
typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_0
{
OpenCV_API_Header api_header;
@ -134,7 +157,17 @@ typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_1
struct OpenCV_VideoIO_Capture_Plugin_API_v1_1_api_entries v1;
} OpenCV_VideoIO_Capture_Plugin_API_v1_1;
#if CAPTURE_ABI_VERSION == 1 && CAPTURE_API_VERSION == 1
typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_2
{
OpenCV_API_Header api_header;
struct OpenCV_VideoIO_Capture_Plugin_API_v1_0_api_entries v0;
struct OpenCV_VideoIO_Capture_Plugin_API_v1_1_api_entries v1;
struct OpenCV_VideoIO_Capture_Plugin_API_v1_2_api_entries v2;
} OpenCV_VideoIO_Capture_Plugin_API_v1_2;
#if CAPTURE_ABI_VERSION == 1 && CAPTURE_API_VERSION == 2
typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_2 OpenCV_VideoIO_Capture_Plugin_API;
#elif CAPTURE_ABI_VERSION == 1 && CAPTURE_API_VERSION == 1
typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_1 OpenCV_VideoIO_Capture_Plugin_API;
#elif CAPTURE_ABI_VERSION == 1 && CAPTURE_API_VERSION == 0
typedef struct OpenCV_VideoIO_Capture_Plugin_API_v1_0 OpenCV_VideoIO_Capture_Plugin_API;

View File

@ -47,7 +47,12 @@ namespace {
#define DECLARE_STATIC_BACKEND(cap, name, mode, createCaptureFile, createCaptureCamera, createWriter) \
{ \
cap, (BackendMode)(mode), 1000, name, createBackendFactory(createCaptureFile, createCaptureCamera, createWriter) \
cap, (BackendMode)(mode), 1000, name, createBackendFactory(createCaptureFile, createCaptureCamera, 0, createWriter) \
},
#define DECLARE_STATIC_BACKEND_WITH_STREAM_SUPPORT(cap, name, mode, createCaptureStream) \
{ \
cap, (BackendMode)(mode), 1000, name, createBackendFactory(0, 0, createCaptureStream, 0) \
},
/** Ordering guidelines:
@ -62,8 +67,9 @@ static const struct VideoBackendInfo builtin_backends[] =
{
#ifdef HAVE_FFMPEG
DECLARE_STATIC_BACKEND(CAP_FFMPEG, "FFMPEG", MODE_CAPTURE_BY_FILENAME | MODE_WRITER, cvCreateFileCapture_FFMPEG_proxy, 0, cvCreateVideoWriter_FFMPEG_proxy)
DECLARE_STATIC_BACKEND_WITH_STREAM_SUPPORT(CAP_FFMPEG, "FFMPEG", MODE_CAPTURE_BY_STREAM, cvCreateStreamCapture_FFMPEG_proxy)
#elif defined(ENABLE_PLUGINS) || defined(HAVE_FFMPEG_WRAPPER)
DECLARE_DYNAMIC_BACKEND(CAP_FFMPEG, "FFMPEG", MODE_CAPTURE_BY_FILENAME | MODE_WRITER)
DECLARE_DYNAMIC_BACKEND(CAP_FFMPEG, "FFMPEG", MODE_CAPTURE_BY_FILENAME | MODE_CAPTURE_BY_STREAM | MODE_WRITER)
#endif
#ifdef HAVE_GSTREAMER
@ -90,8 +96,9 @@ static const struct VideoBackendInfo builtin_backends[] =
#ifdef HAVE_MSMF
DECLARE_STATIC_BACKEND(CAP_MSMF, "MSMF", MODE_CAPTURE_ALL | MODE_WRITER, cvCreateCapture_MSMF, cvCreateCapture_MSMF, cvCreateVideoWriter_MSMF)
DECLARE_STATIC_BACKEND_WITH_STREAM_SUPPORT(CAP_MSMF, "MSMF", MODE_CAPTURE_BY_STREAM, cvCreateCapture_MSMF)
#elif defined(ENABLE_PLUGINS) && defined(_WIN32)
DECLARE_DYNAMIC_BACKEND(CAP_MSMF, "MSMF", MODE_CAPTURE_ALL | MODE_WRITER)
DECLARE_DYNAMIC_BACKEND(CAP_MSMF, "MSMF", MODE_CAPTURE_ALL | MODE_CAPTURE_BY_STREAM | MODE_WRITER)
#endif
#ifdef HAVE_DSHOW
@ -330,6 +337,17 @@ public:
}
return result;
}
inline std::vector<VideoBackendInfo> getAvailableBackends_CaptureByStream() const
{
std::vector<VideoBackendInfo> result;
for (size_t i = 0; i < enabledBackends.size(); i++)
{
const VideoBackendInfo& info = enabledBackends[i];
if (info.mode & MODE_CAPTURE_BY_STREAM)
result.push_back(info);
}
return result;
}
inline std::vector<VideoBackendInfo> getAvailableBackends_Writer() const
{
std::vector<VideoBackendInfo> result;
@ -357,6 +375,11 @@ std::vector<VideoBackendInfo> getAvailableBackends_CaptureByFilename()
const std::vector<VideoBackendInfo> result = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByFilename();
return result;
}
std::vector<VideoBackendInfo> getAvailableBackends_CaptureByStream()
{
const std::vector<VideoBackendInfo> result = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByStream();
return result;
}
std::vector<VideoBackendInfo> getAvailableBackends_Writer()
{
const std::vector<VideoBackendInfo> result = VideoBackendRegistry::getInstance().getAvailableBackends_Writer();
@ -424,6 +447,15 @@ std::vector<VideoCaptureAPIs> getStreamBackends()
}
std::vector<VideoCaptureAPIs> getStreamBufferedBackends()
{
const std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByStream();
std::vector<VideoCaptureAPIs> result;
for (size_t i = 0; i < backends.size(); i++)
result.push_back((VideoCaptureAPIs)backends[i].id);
return result;
}
std::vector<VideoCaptureAPIs> getWriterBackends()
{
const std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getAvailableBackends_Writer();
@ -501,6 +533,24 @@ std::string getStreamBackendPluginVersion(VideoCaptureAPIs api,
CV_Error(Error::StsError, "Unknown or wrong backend ID");
}
std::string getStreamBufferedBackendPluginVersion(VideoCaptureAPIs api,
CV_OUT int& version_ABI,
CV_OUT int& version_API
)
{
const std::vector<VideoBackendInfo> backends = VideoBackendRegistry::getInstance().getAvailableBackends_CaptureByStream();
for (size_t i = 0; i < backends.size(); i++)
{
const VideoBackendInfo& info = backends[i];
if (api == info.id)
{
CV_Assert(!info.backendFactory.empty());
CV_Assert(!info.backendFactory->isBuiltIn());
return getCapturePluginVersion(info.backendFactory, version_ABI, version_API);
}
}
CV_Error(Error::StsError, "Unknown or wrong backend ID");
}
/** @brief Returns description and ABI/API version of videoio plugin's writer interface */
std::string getWriterBackendPluginVersion(VideoCaptureAPIs api,

View File

@ -14,6 +14,7 @@ namespace cv
enum BackendMode {
MODE_CAPTURE_BY_INDEX = 1 << 0, //!< device index
MODE_CAPTURE_BY_FILENAME = 1 << 1, //!< filename or device path (v4l2)
MODE_CAPTURE_BY_STREAM = 1 << 2, //!< data stream
MODE_WRITER = 1 << 4, //!< writer
MODE_CAPTURE_ALL = MODE_CAPTURE_BY_INDEX + MODE_CAPTURE_BY_FILENAME,
@ -38,6 +39,7 @@ namespace videoio_registry {
std::vector<VideoBackendInfo> getAvailableBackends_CaptureByIndex();
std::vector<VideoBackendInfo> getAvailableBackends_CaptureByFilename();
std::vector<VideoBackendInfo> getAvailableBackends_CaptureByStream();
std::vector<VideoBackendInfo> getAvailableBackends_Writer();
bool checkDeprecatedBackend(int api);

View File

@ -296,10 +296,6 @@ INSTANTIATE_TEST_CASE_P(/**/, videoio_container_get, testing::ValuesIn(videoio_c
typedef tuple<string, string, int, int, bool, bool> videoio_encapsulate_params_t;
typedef testing::TestWithParam< videoio_encapsulate_params_t > videoio_encapsulate;
#if defined(WIN32) // remove when FFmpeg wrapper includes PR25874
#define WIN32_WAIT_FOR_FFMPEG_WRAPPER_UPDATE
#endif
TEST_P(videoio_encapsulate, write)
{
const VideoCaptureAPIs api = CAP_FFMPEG;
@ -331,11 +327,10 @@ TEST_P(videoio_encapsulate, write)
Mat rawFrame;
for (int i = 0; i < nFrames; i++) {
ASSERT_TRUE(capRaw.read(rawFrame));
#if !defined(WIN32_WAIT_FOR_FFMPEG_WRAPPER_UPDATE)
if (setPts && i == 0) {
ASSERT_TRUE(container.set(VIDEOWRITER_PROP_DTS_DELAY, capRaw.get(CAP_PROP_DTS_DELAY)));
double dts = capRaw.get(CAP_PROP_DTS_DELAY);
ASSERT_TRUE(container.set(VIDEOWRITER_PROP_DTS_DELAY, dts)) << "dts=" << dts;
}
#endif
ASSERT_FALSE(rawFrame.empty());
if (i == 0 && mpeg4) {
Mat tmp = rawFrame.clone();
@ -346,11 +341,10 @@ TEST_P(videoio_encapsulate, write)
memcpy(rawFrame.data, extraData.data, extraData.total());
memcpy(rawFrame.data + extraData.total(), tmp.data, tmp.total());
}
#if !defined(WIN32_WAIT_FOR_FFMPEG_WRAPPER_UPDATE)
if (setPts) {
ASSERT_TRUE(container.set(VIDEOWRITER_PROP_PTS, capRaw.get(CAP_PROP_PTS)));
double pts = capRaw.get(CAP_PROP_PTS);
ASSERT_TRUE(container.set(VIDEOWRITER_PROP_PTS, pts)) << "pts=" << pts;
}
#endif
container.write(rawFrame);
}
container.release();
@ -381,11 +375,9 @@ TEST_P(videoio_encapsulate, write)
const bool keyFrameActual = capActualRaw.get(CAP_PROP_LRF_HAS_KEY_FRAME) == 1.;
const bool keyFrameReference = idrPeriod ? i % idrPeriod == 0 : 1;
ASSERT_EQ(keyFrameReference, keyFrameActual);
#if !defined(WIN32_WAIT_FOR_FFMPEG_WRAPPER_UPDATE)
if (tsWorking) {
ASSERT_EQ(round(capReference.get(CAP_PROP_POS_MSEC)), round(capActual.get(CAP_PROP_POS_MSEC)));
}
#endif
}
}

View File

@ -3,6 +3,7 @@
// of this distribution and at http://opencv.org/license.html.
#include "test_precomp.hpp"
#include "opencv2/core/utils/filesystem.hpp"
namespace opencv_test
{
@ -1024,4 +1025,133 @@ INSTANTIATE_TEST_CASE_P(videoio, videowriter_acceleration, testing::Combine(
testing::ValuesIn(hw_use_umat)
));
class BufferStream : public cv::IStreamReader
{
public:
BufferStream(const std::string& filename)
{
Ptr<std::filebuf> file = makePtr<std::filebuf>();
file->open(filename.c_str(), std::ios::in | std::ios::binary);
stream = file;
}
BufferStream(const Ptr<std::stringbuf>& _stream) : stream(_stream) {}
long long read(char* buffer, long long size) CV_OVERRIDE
{
auto result = stream->sgetn(buffer, size);
return result;
}
long long seek(long long offset, int way) CV_OVERRIDE
{
auto result = stream->pubseekoff(offset, way == SEEK_SET ? std::ios_base::beg : (way == SEEK_END ? std::ios_base::end : std::ios_base::cur));
return result;
}
private:
Ptr<std::streambuf> stream;
};
typedef testing::TestWithParam<tuple<std::string, VideoCaptureAPIs>> stream_capture;
TEST_P(stream_capture, read)
{
std::string ext = get<0>(GetParam());
VideoCaptureAPIs apiPref = get<1>(GetParam());
std::vector<VideoCaptureAPIs> supportedAPIs = videoio_registry::getStreamBufferedBackends();
if (!videoio_registry::hasBackend(apiPref))
throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref));
if (std::find(supportedAPIs.begin(), supportedAPIs.end(), apiPref) == supportedAPIs.end())
throw SkipTestException(cv::String("Backend is not supported: ") + cv::videoio_registry::getBackendName(apiPref));
if (cvtest::skipUnstableTests && apiPref == CAP_MSMF && (ext == "h264" || ext == "h265" || ext == "mpg"))
throw SkipTestException("Unstable MSMF test");
if (!videoio_registry::isBackendBuiltIn(apiPref))
{
int pluginABI, pluginAPI;
videoio_registry::getStreamBufferedBackendPluginVersion(apiPref, pluginABI, pluginAPI);
if (pluginABI < 1 || (pluginABI == 1 && pluginAPI < 2))
throw SkipTestException(format("Buffer capture supported since ABI/API = 1/2. %s plugin is %d/%d",
cv::videoio_registry::getBackendName(apiPref).c_str(), pluginABI, pluginAPI));
}
VideoCapture cap;
String video_file = BunnyParameters::getFilename(String(".") + ext);
ASSERT_TRUE(utils::fs::exists(video_file));
EXPECT_NO_THROW(cap.open(makePtr<BufferStream>(video_file), apiPref, {}));
ASSERT_TRUE(cap.isOpened());
const int numFrames = 10;
Mat frames[numFrames];
Mat hardCopies[numFrames];
for(int i = 0; i < numFrames; i++)
{
ASSERT_NO_THROW(cap >> frames[i]);
EXPECT_FALSE(frames[i].empty());
hardCopies[i] = frames[i].clone();
}
for(int i = 0; i < numFrames; i++)
EXPECT_EQ(0, cv::norm(frames[i], hardCopies[i], NORM_INF)) << i;
}
INSTANTIATE_TEST_CASE_P(videoio, stream_capture,
testing::Combine(
testing::ValuesIn(bunny_params),
testing::ValuesIn(backend_params)));
// This test for stream input for container format (See test_ffmpeg/videoio_container.read test)
typedef testing::TestWithParam<std::string> stream_capture_ffmpeg;
TEST_P(stream_capture_ffmpeg, raw)
{
std::string ext = GetParam();
VideoCaptureAPIs apiPref = CAP_FFMPEG;
std::vector<VideoCaptureAPIs> supportedAPIs = videoio_registry::getStreamBufferedBackends();
if (!videoio_registry::hasBackend(apiPref))
throw SkipTestException(cv::String("Backend is not available/disabled: ") + cv::videoio_registry::getBackendName(apiPref));
if (std::find(supportedAPIs.begin(), supportedAPIs.end(), apiPref) == supportedAPIs.end())
throw SkipTestException(cv::String("Backend is not supported: ") + cv::videoio_registry::getBackendName(apiPref));
if (!videoio_registry::isBackendBuiltIn(apiPref))
{
int pluginABI, pluginAPI;
videoio_registry::getStreamBufferedBackendPluginVersion(apiPref, pluginABI, pluginAPI);
if (pluginABI < 1 || (pluginABI == 1 && pluginAPI < 2))
throw SkipTestException(format("Buffer capture supported since ABI/API = 1/2. %s plugin is %d/%d",
cv::videoio_registry::getBackendName(apiPref).c_str(), pluginABI, pluginAPI));
}
VideoCapture container;
String video_file = BunnyParameters::getFilename(String(".") + ext);
ASSERT_TRUE(utils::fs::exists(video_file));
EXPECT_NO_THROW(container.open(video_file, apiPref, {CAP_PROP_FORMAT, -1}));
ASSERT_TRUE(container.isOpened());
ASSERT_EQ(-1.f, container.get(CAP_PROP_FORMAT));
auto stream = std::make_shared<std::stringbuf>();
Mat keyFrame;
while (true)
{
container >> keyFrame;
if (keyFrame.empty())
break;
stream->sputn(keyFrame.ptr<char>(), keyFrame.total());
}
VideoCapture capRef(video_file);
VideoCapture capStream;
EXPECT_NO_THROW(capStream.open(makePtr<BufferStream>(stream), apiPref, {}));
ASSERT_TRUE(capStream.isOpened());
const int numFrames = 10;
Mat frameRef, frame;
for (int i = 0; i < numFrames; ++i)
{
capRef >> frameRef;
ASSERT_NO_THROW(capStream >> frame);
EXPECT_FALSE(frame.empty());
EXPECT_EQ(0, cv::norm(frame, frameRef, NORM_INF)) << i;
}
}
INSTANTIATE_TEST_CASE_P(videoio, stream_capture_ffmpeg, testing::Values("h264", "h265", "mjpg.avi"));
} // namespace

View File

@ -4,7 +4,7 @@
<parent>
<groupId>org.opencv</groupId>
<artifactId>opencv-parent</artifactId>
<version>4.10.0</version>
<version>4.11.0</version>
</parent>
<groupId>org.opencv</groupId>
<artifactId>opencv-it</artifactId>

View File

@ -4,7 +4,7 @@
<parent>
<groupId>org.opencv</groupId>
<artifactId>opencv-parent</artifactId>
<version>4.10.0</version>
<version>4.11.0</version>
</parent>
<groupId>org.opencv</groupId>
<artifactId>opencv</artifactId>

View File

@ -3,7 +3,7 @@
<modelVersion>4.0.0</modelVersion>
<groupId>org.opencv</groupId>
<artifactId>opencv-parent</artifactId>
<version>4.10.0</version>
<version>4.11.0</version>
<packaging>pom</packaging>
<name>OpenCV Parent POM</name>
<licenses>

View File

@ -57,7 +57,6 @@ public class RecorderActivity extends CameraActivity implements CvCameraViewList
private VideoWriter mVideoWriter = null;
private VideoCapture mVideoCapture = null;
private Mat mVideoFrame;
private Mat mRenderFrame;
public RecorderActivity() {
Log.i(TAG, "Instantiated new " + this.getClass());
@ -122,7 +121,6 @@ public class RecorderActivity extends CameraActivity implements CvCameraViewList
mTriggerButton.setText("Start Camera");
mVideoFrame.release();
mRenderFrame.release();
}
@Override
@ -132,7 +130,6 @@ public class RecorderActivity extends CameraActivity implements CvCameraViewList
super.onResume();
mVideoFrame = new Mat();
mRenderFrame = new Mat();
changeStatus();
}
@ -294,12 +291,16 @@ public class RecorderActivity extends CameraActivity implements CvCameraViewList
mVideoCapture = new VideoCapture(mVideoFilename, Videoio.CAP_OPENCV_MJPEG);
}
if (!mVideoCapture.isOpened()) {
if (mVideoCapture == null || !mVideoCapture.isOpened()) {
Log.e(TAG, "Can't open video");
Toast.makeText(this, "Can't open file " + mVideoFilename, Toast.LENGTH_SHORT).show();
return false;
}
if (!mUseBuiltInMJPG){
mVideoCapture.set(Videoio.CAP_PROP_FOURCC, VideoWriter.fourcc('R','G','B','4'));
}
Toast.makeText(this, "Starting playback from file " + mVideoFilename, Toast.LENGTH_SHORT).show();
mPlayerThread = new Runnable() {
@ -315,11 +316,14 @@ public class RecorderActivity extends CameraActivity implements CvCameraViewList
}
return;
}
// VideoCapture with CAP_ANDROID generates RGB frames instead of BGR
// https://github.com/opencv/opencv/issues/24687
Imgproc.cvtColor(mVideoFrame, mRenderFrame, mUseBuiltInMJPG ? Imgproc.COLOR_BGR2RGBA: Imgproc.COLOR_RGB2RGBA);
Bitmap bmp = Bitmap.createBitmap(mRenderFrame.cols(), mRenderFrame.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(mRenderFrame, bmp);
// MJPEG codec will output BGR only. So we need to convert to RGBA.
if (mUseBuiltInMJPG) {
Imgproc.cvtColor(mVideoFrame, mVideoFrame, Imgproc.COLOR_BGR2RGBA);
}
Bitmap bmp = Bitmap.createBitmap(mVideoFrame.cols(), mVideoFrame.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(mVideoFrame, bmp);
mImageView.setImageBitmap(bmp);
Handler h = new Handler();
h.postDelayed(this, 33);