Merge pull request #19755 from mikhail-nikolskiy:ffmpeg-umat

cv::UMat output/input in VideoCapture/VideoWriter (data stays in GPU memory)

* FFMPEG with UMat input/output

* OpenCL_D3D* context

* fix Linux build

* cosmetic changes

* fix build if USE_AV_HW_CODECS=0

* simplify how child context pointer stored in parent context

* QSV interop with OpenCL on Windows

* detect_msdk.cmake via pkg-config

* fix av_buffer_ref() usage

* revert windows-decode-mfx whitelisting; remove debug msg

* address review comments

* rename property to HW_ACCELERATION_USE_OPENCL

* fix issue with "cl_khr_d3d11_sharing" extension not reported by OpenCL GPU+CPU platform

* core(ocl): add OpenCL stubs for configurations without OpenCL

* videoio(ffmpeg): update #if guards

* Put OpenCL related code under HAVE_OPENCL; simplify reuse of media context from OpenCL context

* videoio(test): skip unsupported tests

- plugins don't support OpenCL/UMat yet
- change handling of *_USE_OPENCL flag

* videoio(ffmpeg): OpenCL dependency

* videoio(ffmpeg): MediaSDK/oneVPL dependency

* cleanup, logging

* cmake: fix handling of 3rdparty interface targets

Co-authored-by: Alexander Alekhin <alexander.a.alekhin@gmail.com>
This commit is contained in:
Mikhail Nikolskii 2021-05-14 19:48:50 +03:00 committed by GitHub
parent bb92eb5a93
commit a604d44d06
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 1347 additions and 783 deletions

View File

@ -1431,7 +1431,16 @@ if(WITH_LIBREALSENSE OR HAVE_LIBREALSENSE)
endif()
if(WITH_MFX OR HAVE_MFX)
status(" Intel Media SDK:" HAVE_MFX THEN "YES (${MFX_LIBRARY})" ELSE NO)
if(HAVE_MFX)
if(MFX_LIBRARY)
set(__details " (${MFX_LIBRARY})")
elseif(MFX_LIBRARIES)
set(__details " (${MFX_LIBRARIES})")
else()
set(__details " (unknown)")
endif()
endif()
status(" Intel Media SDK:" HAVE_MFX THEN "YES${__details}" ELSE NO)
endif()
if(WITH_GPHOTO2 OR HAVE_GPHOTO2)

View File

@ -866,7 +866,9 @@ macro(ocv_check_modules define)
foreach(flag ${${define}_LDFLAGS})
if(flag MATCHES "^-L(.*)")
list(APPEND _libs_paths ${CMAKE_MATCH_1})
elseif(IS_ABSOLUTE "${flag}")
elseif(IS_ABSOLUTE "${flag}"
OR flag STREQUAL "-lstdc++"
)
list(APPEND _libs "${flag}")
elseif(flag MATCHES "^-l(.*)")
set(_lib "${CMAKE_MATCH_1}")
@ -1578,24 +1580,41 @@ endfunction()
function(ocv_add_external_target name inc link def)
if(BUILD_SHARED_LIBS)
if(BUILD_SHARED_LIBS AND link)
set(imp IMPORTED)
endif()
add_library(ocv.3rdparty.${name} INTERFACE ${imp})
set_target_properties(ocv.3rdparty.${name} PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "${inc}"
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "${inc}"
INTERFACE_COMPILE_DEFINITIONS "${def}")
# When cmake version is greater than or equal to 3.11, INTERFACE_LINK_LIBRARIES no longer applies to interface library
# See https://github.com/opencv/opencv/pull/18658
if (CMAKE_VERSION VERSION_LESS 3.11)
set_target_properties(ocv.3rdparty.${name} PROPERTIES
INTERFACE_LINK_LIBRARIES "${link}")
else()
target_link_libraries(ocv.3rdparty.${name} INTERFACE ${link})
if(def)
if(NOT (CMAKE_VERSION VERSION_LESS "3.11.0")) # https://gitlab.kitware.com/cmake/cmake/-/merge_requests/1264 : eliminates "Cannot specify compile definitions for imported target" error message
target_compile_definitions(ocv.3rdparty.${name} INTERFACE "${def}")
else()
set_target_properties(ocv.3rdparty.${name} PROPERTIES INTERFACE_COMPILE_DEFINITIONS "${def}")
endif()
endif()
#
if(NOT BUILD_SHARED_LIBS)
if(inc)
if(NOT (CMAKE_VERSION VERSION_LESS "3.11.0")) # https://gitlab.kitware.com/cmake/cmake/-/merge_requests/1264 : eliminates "Cannot specify compile definitions for imported target" error message
target_include_directories(ocv.3rdparty.${name} SYSTEM INTERFACE "$<BUILD_INTERFACE:${inc}>")
else()
set_target_properties(ocv.3rdparty.${name} PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES "$<BUILD_INTERFACE:${inc}>"
INTERFACE_SYSTEM_INCLUDE_DIRECTORIES "$<BUILD_INTERFACE:${inc}>"
)
endif()
endif()
if(link)
# When cmake version is greater than or equal to 3.11, INTERFACE_LINK_LIBRARIES no longer applies to interface library
# See https://github.com/opencv/opencv/pull/18658
if(CMAKE_VERSION VERSION_LESS 3.11)
set_target_properties(ocv.3rdparty.${name} PROPERTIES
INTERFACE_LINK_LIBRARIES "${link}")
else()
target_link_libraries(ocv.3rdparty.${name} INTERFACE ${link})
endif()
endif()
# to install used target only upgrade CMake
if(NOT BUILD_SHARED_LIBS
AND CMAKE_VERSION VERSION_LESS "3.13.0" # https://gitlab.kitware.com/cmake/cmake/-/merge_requests/2152
)
install(TARGETS ocv.3rdparty.${name} EXPORT OpenCVModules)
endif()
endfunction()

View File

@ -43,6 +43,8 @@
#define OPENCV_OPENCL_HPP
#include "opencv2/core.hpp"
#include <typeinfo>
#include <typeindex>
namespace cv { namespace ocl {
@ -277,6 +279,12 @@ public:
/** @returns cl_context value */
void* ptr() const;
/**
* @brief Get OpenCL context property specified on context creation
* @param propertyId Property id (CL_CONTEXT_* as defined in cl_context_properties type)
* @returns Property value if property was specified on clCreateContext, or NULL if context created without the property
*/
void* getOpenCLContextProperty(int propertyId) const;
bool useSVM() const;
void setUseSVM(bool enabled);
@ -290,6 +298,21 @@ public:
void release();
class CV_EXPORTS UserContext {
public:
virtual ~UserContext();
};
template <typename T>
inline void setUserContext(const std::shared_ptr<T>& userContext) {
setUserContext(typeid(T), userContext);
}
template <typename T>
inline std::shared_ptr<T> getUserContext() {
return std::dynamic_pointer_cast<T>(getUserContext(typeid(T)));
}
void setUserContext(std::type_index typeId, const std::shared_ptr<UserContext>& userContext);
std::shared_ptr<UserContext> getUserContext(std::type_index typeId);
struct Impl;
inline Impl* getImpl() const { return (Impl*)p; }
inline bool empty() const { return !p; }

File diff suppressed because it is too large Load Diff

View File

@ -1,23 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
#ifndef OPENCV_CORE_SRC_DIRECTX_HPP
#define OPENCV_CORE_SRC_DIRECTX_HPP
#ifndef HAVE_DIRECTX
#error Invalid build configuration
#endif
namespace cv {
namespace directx {
namespace internal {
struct OpenCLDirectXImpl;
OpenCLDirectXImpl* createDirectXImpl();
void deleteDirectXImpl(OpenCLDirectXImpl**);
OpenCLDirectXImpl* getDirectXImpl(ocl::Context& ctx);
}}} // namespace internal
#endif // OPENCV_CORE_SRC_DIRECTX_HPP

View File

@ -113,10 +113,6 @@
#include "opencv2/core/opencl/runtime/opencl_core.hpp"
#ifdef HAVE_DIRECTX
#include "directx.hpp"
#endif
#ifdef HAVE_OPENCL_SVM
#include "opencv2/core/opencl/runtime/opencl_svm_20.hpp"
#include "opencv2/core/opencl/runtime/opencl_svm_hsa_extension.hpp"
@ -2367,9 +2363,6 @@ protected:
, contextId(CV_XADD(&g_contextId, 1))
, configuration(configuration_)
, handle(0)
#ifdef HAVE_DIRECTX
, p_directx_impl(0)
#endif
#ifdef HAVE_OPENCL_SVM
, svmInitialized(false)
#endif
@ -2395,11 +2388,10 @@ protected:
handle = NULL;
}
devices.clear();
#ifdef HAVE_DIRECTX
directx::internal::deleteDirectXImpl(&p_directx_impl);
#endif
}
userContextStorage.clear();
{
cv::AutoLock lock(cv::getInitializationMutex());
auto& container = getGlobalContainer();
@ -2705,18 +2697,20 @@ public:
return *bufferPoolHostPtr_.get();
}
#ifdef HAVE_DIRECTX
directx::internal::OpenCLDirectXImpl* p_directx_impl;
directx::internal::OpenCLDirectXImpl* getDirectXImpl()
{
if (!p_directx_impl)
{
p_directx_impl = directx::internal::createDirectXImpl();
}
return p_directx_impl;
std::map<std::type_index, std::shared_ptr<UserContext>> userContextStorage;
cv::Mutex userContextMutex;
void setUserContext(std::type_index typeId, const std::shared_ptr<UserContext>& userContext) {
cv::AutoLock lock(userContextMutex);
userContextStorage[typeId] = userContext;
}
std::shared_ptr<UserContext> getUserContext(std::type_index typeId) {
cv::AutoLock lock(userContextMutex);
auto it = userContextStorage.find(typeId);
if (it != userContextStorage.end())
return it->second;
else
return nullptr;
}
#endif
#ifdef HAVE_OPENCL_SVM
bool svmInitialized;
@ -3036,6 +3030,25 @@ Context Context::create(const std::string& configuration)
return ctx;
}
void* Context::getOpenCLContextProperty(int propertyId) const
{
if (p == NULL)
return nullptr;
::size_t size = 0;
CV_OCL_CHECK(clGetContextInfo(p->handle, CL_CONTEXT_PROPERTIES, 0, NULL, &size));
std::vector<cl_context_properties> prop(size / sizeof(cl_context_properties), (cl_context_properties)0);
CV_OCL_CHECK(clGetContextInfo(p->handle, CL_CONTEXT_PROPERTIES, size, prop.data(), NULL));
for (size_t i = 0; i < prop.size(); i += 2)
{
if (prop[i] == (cl_context_properties)propertyId)
{
CV_LOG_DEBUG(NULL, "OpenCL: found context property=" << propertyId << ") => " << (void*)prop[i + 1]);
return (void*)prop[i + 1];
}
}
return nullptr;
}
#ifdef HAVE_OPENCL_SVM
bool Context::useSVM() const
{
@ -3097,6 +3110,21 @@ CV_EXPORTS bool useSVM(UMatUsageFlags usageFlags)
} // namespace cv::ocl::svm
#endif // HAVE_OPENCL_SVM
Context::UserContext::~UserContext()
{
}
void Context::setUserContext(std::type_index typeId, const std::shared_ptr<Context::UserContext>& userContext)
{
CV_Assert(p);
p->setUserContext(typeId, userContext);
}
std::shared_ptr<Context::UserContext> Context::getUserContext(std::type_index typeId)
{
CV_Assert(p);
return p->getUserContext(typeId);
}
static void get_platform_name(cl_platform_id id, String& name)
{
@ -7505,15 +7533,4 @@ uint64 Timer::durationNS() const
}} // namespace
#ifdef HAVE_DIRECTX
namespace cv { namespace directx { namespace internal {
OpenCLDirectXImpl* getDirectXImpl(ocl::Context& ctx)
{
ocl::Context::Impl* i = ctx.getImpl();
CV_Assert(i);
return i->getDirectXImpl();
}
}}} // namespace cv::directx::internal
#endif
#endif // HAVE_OPENCL

View File

@ -172,9 +172,16 @@ Context& Context::getDefault(bool initialize)
}
void* Context::ptr() const { return NULL; }
void* Context::getOpenCLContextProperty(int /*propertyId*/) const { OCL_NOT_AVAILABLE(); }
bool Context::useSVM() const { return false; }
void Context::setUseSVM(bool enabled) { }
Context::UserContext::~UserContext() { }
void Context::setUserContext(std::type_index /*typeId*/, const std::shared_ptr<Context::UserContext>& /*userContext*/) { OCL_NOT_AVAILABLE(); }
std::shared_ptr<Context::UserContext> Context::getUserContext(std::type_index /*typeId*/) { OCL_NOT_AVAILABLE(); }
/* static */ Context Context::fromHandle(void* context) { OCL_NOT_AVAILABLE(); }
/* static */ Context Context::fromDevice(const ocl::Device& device) { OCL_NOT_AVAILABLE(); }
/* static */ Context Context::create(const std::string& configuration) { OCL_NOT_AVAILABLE(); }

View File

@ -7,6 +7,8 @@
#include "precomp.hpp"
#include <opencv2/core/utils/logger.hpp>
#ifdef HAVE_VA
# include <va/va.h>
#else // HAVE_VA
@ -48,12 +50,28 @@ namespace cv { namespace va_intel {
#ifdef HAVE_VA_INTEL
static clGetDeviceIDsFromVA_APIMediaAdapterINTEL_fn clGetDeviceIDsFromVA_APIMediaAdapterINTEL = NULL;
static clCreateFromVA_APIMediaSurfaceINTEL_fn clCreateFromVA_APIMediaSurfaceINTEL = NULL;
static clEnqueueAcquireVA_APIMediaSurfacesINTEL_fn clEnqueueAcquireVA_APIMediaSurfacesINTEL = NULL;
static clEnqueueReleaseVA_APIMediaSurfacesINTEL_fn clEnqueueReleaseVA_APIMediaSurfacesINTEL = NULL;
static bool contextInitialized = false;
class VAAPIInterop : public ocl::Context::UserContext
{
public:
VAAPIInterop(cl_platform_id platform) {
clCreateFromVA_APIMediaSurfaceINTEL = (clCreateFromVA_APIMediaSurfaceINTEL_fn)
clGetExtensionFunctionAddressForPlatform(platform, "clCreateFromVA_APIMediaSurfaceINTEL");
clEnqueueAcquireVA_APIMediaSurfacesINTEL = (clEnqueueAcquireVA_APIMediaSurfacesINTEL_fn)
clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueAcquireVA_APIMediaSurfacesINTEL");
clEnqueueReleaseVA_APIMediaSurfacesINTEL = (clEnqueueReleaseVA_APIMediaSurfacesINTEL_fn)
clGetExtensionFunctionAddressForPlatform(platform, "clEnqueueReleaseVA_APIMediaSurfacesINTEL");
if (!clCreateFromVA_APIMediaSurfaceINTEL ||
!clEnqueueAcquireVA_APIMediaSurfacesINTEL ||
!clEnqueueReleaseVA_APIMediaSurfacesINTEL) {
CV_Error(cv::Error::OpenCLInitError, "OpenCL: Can't get extension function for VA-API interop");
}
}
virtual ~VAAPIInterop() {
}
clCreateFromVA_APIMediaSurfaceINTEL_fn clCreateFromVA_APIMediaSurfaceINTEL;
clEnqueueAcquireVA_APIMediaSurfacesINTEL_fn clEnqueueAcquireVA_APIMediaSurfacesINTEL;
clEnqueueReleaseVA_APIMediaSurfacesINTEL_fn clEnqueueReleaseVA_APIMediaSurfacesINTEL;
};
#endif // HAVE_VA_INTEL
@ -65,10 +83,8 @@ Context& initializeContextFromVA(VADisplay display, bool tryInterop)
#if !defined(HAVE_VA)
NO_VA_SUPPORT_ERROR;
#else // !HAVE_VA
init_libva();
# ifdef HAVE_VA_INTEL
contextInitialized = false;
if (tryInterop)
{
cl_uint numPlatforms;
@ -97,20 +113,10 @@ Context& initializeContextFromVA(VADisplay display, bool tryInterop)
for (int i = 0; i < (int)numPlatforms; ++i)
{
// Get extension function pointers
clGetDeviceIDsFromVA_APIMediaAdapterINTEL_fn clGetDeviceIDsFromVA_APIMediaAdapterINTEL;
clGetDeviceIDsFromVA_APIMediaAdapterINTEL = (clGetDeviceIDsFromVA_APIMediaAdapterINTEL_fn)
clGetExtensionFunctionAddressForPlatform(platforms[i], "clGetDeviceIDsFromVA_APIMediaAdapterINTEL");
clCreateFromVA_APIMediaSurfaceINTEL = (clCreateFromVA_APIMediaSurfaceINTEL_fn)
clGetExtensionFunctionAddressForPlatform(platforms[i], "clCreateFromVA_APIMediaSurfaceINTEL");
clEnqueueAcquireVA_APIMediaSurfacesINTEL = (clEnqueueAcquireVA_APIMediaSurfacesINTEL_fn)
clGetExtensionFunctionAddressForPlatform(platforms[i], "clEnqueueAcquireVA_APIMediaSurfacesINTEL");
clEnqueueReleaseVA_APIMediaSurfacesINTEL = (clEnqueueReleaseVA_APIMediaSurfacesINTEL_fn)
clGetExtensionFunctionAddressForPlatform(platforms[i], "clEnqueueReleaseVA_APIMediaSurfacesINTEL");
if (((void*)clGetDeviceIDsFromVA_APIMediaAdapterINTEL == NULL) ||
((void*)clCreateFromVA_APIMediaSurfaceINTEL == NULL) ||
((void*)clEnqueueAcquireVA_APIMediaSurfacesINTEL == NULL) ||
((void*)clEnqueueReleaseVA_APIMediaSurfacesINTEL == NULL))
if ((void*)clGetDeviceIDsFromVA_APIMediaAdapterINTEL == NULL)
{
continue;
}
@ -151,8 +157,6 @@ Context& initializeContextFromVA(VADisplay display, bool tryInterop)
if (found >= 0)
{
contextInitialized = true;
cl_platform_id platform = platforms[found];
std::string platformName = PlatformInfo(&platform).name();
@ -160,6 +164,7 @@ Context& initializeContextFromVA(VADisplay display, bool tryInterop)
try
{
clExecCtx = OpenCLExecutionContext::create(platformName, platform, context, device);
clExecCtx.getContext().setUserContext(std::make_shared<VAAPIInterop>(platform));
}
catch (...)
{
@ -520,7 +525,6 @@ void convertToVASurface(VADisplay display, InputArray src, VASurfaceID surface,
#if !defined(HAVE_VA)
NO_VA_SUPPORT_ERROR;
#else // !HAVE_VA
init_libva();
const int stype = CV_8UC3;
@ -531,7 +535,18 @@ void convertToVASurface(VADisplay display, InputArray src, VASurfaceID surface,
CV_Assert(srcSize.width == size.width && srcSize.height == size.height);
#ifdef HAVE_VA_INTEL
if (contextInitialized)
ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrent();
VAAPIInterop* interop = ocl_context.getContext().getUserContext<VAAPIInterop>().get();
CV_LOG_IF_DEBUG(NULL, !interop,
"OpenCL/VA_INTEL: Can't interop with current OpenCL context - missing VAAPIInterop API. "
"OpenCL context should be created through initializeContextFromVA()");
void* context_display = ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_VA_API_DISPLAY_INTEL);
CV_LOG_IF_INFO(NULL, interop && !context_display,
"OpenCL/VA_INTEL: Can't interop with current OpenCL context - missing VA display, context re-creation is required");
bool isValidContextDisplay = (display == context_display);
CV_LOG_IF_INFO(NULL, interop && context_display && !isValidContextDisplay,
"OpenCL/VA_INTEL: Can't interop with current OpenCL context - VA display mismatch: " << context_display << "(context) vs " << (void*)display << "(surface)");
if (isValidContextDisplay && interop)
{
UMat u = src.getUMat();
@ -541,28 +556,26 @@ void convertToVASurface(VADisplay display, InputArray src, VASurfaceID surface,
cl_mem clBuffer = (cl_mem)u.handle(ACCESS_READ);
using namespace cv::ocl;
Context& ctx = Context::getDefault();
cl_context context = (cl_context)ctx.ptr();
cl_context context = (cl_context)ocl_context.getContext().ptr();
cl_int status = 0;
cl_mem clImageY = clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_WRITE_ONLY, &surface, 0, &status);
cl_mem clImageY = interop->clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_WRITE_ONLY, &surface, 0, &status);
if (status != CL_SUCCESS)
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromVA_APIMediaSurfaceINTEL failed (Y plane)");
cl_mem clImageUV = clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_WRITE_ONLY, &surface, 1, &status);
cl_mem clImageUV = interop->clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_WRITE_ONLY, &surface, 1, &status);
if (status != CL_SUCCESS)
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromVA_APIMediaSurfaceINTEL failed (UV plane)");
cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
cl_command_queue q = (cl_command_queue)ocl_context.getQueue().ptr();
cl_mem images[2] = { clImageY, clImageUV };
status = clEnqueueAcquireVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
status = interop->clEnqueueAcquireVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
if (status != CL_SUCCESS)
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireVA_APIMediaSurfacesINTEL failed");
if (!ocl::ocl_convert_bgr_to_nv12(clBuffer, (int)u.step[0], u.cols, u.rows, clImageY, clImageUV))
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_bgr_to_nv12 failed");
clEnqueueReleaseVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
interop->clEnqueueReleaseVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
if (status != CL_SUCCESS)
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseVA_APIMediaSurfacesINTEL failed");
@ -580,6 +593,7 @@ void convertToVASurface(VADisplay display, InputArray src, VASurfaceID surface,
else
# endif // HAVE_VA_INTEL
{
init_libva();
Mat m = src.getMat();
// TODO Add support for roi
@ -626,7 +640,6 @@ void convertFromVASurface(VADisplay display, VASurfaceID surface, Size size, Out
#if !defined(HAVE_VA)
NO_VA_SUPPORT_ERROR;
#else // !HAVE_VA
init_libva();
const int dtype = CV_8UC3;
@ -634,7 +647,9 @@ void convertFromVASurface(VADisplay display, VASurfaceID surface, Size size, Out
dst.create(size, dtype);
#ifdef HAVE_VA_INTEL
if (contextInitialized)
ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrent();
VAAPIInterop* interop = ocl_context.getContext().getUserContext<VAAPIInterop>().get();
if (display == ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_VA_API_DISPLAY_INTEL) && interop)
{
UMat u = dst.getUMat();
@ -644,28 +659,26 @@ void convertFromVASurface(VADisplay display, VASurfaceID surface, Size size, Out
cl_mem clBuffer = (cl_mem)u.handle(ACCESS_WRITE);
using namespace cv::ocl;
Context& ctx = Context::getDefault();
cl_context context = (cl_context)ctx.ptr();
cl_context context = (cl_context)ocl_context.getContext().ptr();
cl_int status = 0;
cl_mem clImageY = clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_READ_ONLY, &surface, 0, &status);
cl_mem clImageY = interop->clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_READ_ONLY, &surface, 0, &status);
if (status != CL_SUCCESS)
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromVA_APIMediaSurfaceINTEL failed (Y plane)");
cl_mem clImageUV = clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_READ_ONLY, &surface, 1, &status);
cl_mem clImageUV = interop->clCreateFromVA_APIMediaSurfaceINTEL(context, CL_MEM_READ_ONLY, &surface, 1, &status);
if (status != CL_SUCCESS)
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clCreateFromVA_APIMediaSurfaceINTEL failed (UV plane)");
cl_command_queue q = (cl_command_queue)Queue::getDefault().ptr();
cl_command_queue q = (cl_command_queue)ocl_context.getQueue().ptr();
cl_mem images[2] = { clImageY, clImageUV };
status = clEnqueueAcquireVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
status = interop->clEnqueueAcquireVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
if (status != CL_SUCCESS)
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueAcquireVA_APIMediaSurfacesINTEL failed");
if (!ocl::ocl_convert_nv12_to_bgr(clImageY, clImageUV, clBuffer, (int)u.step[0], u.cols, u.rows))
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: ocl_convert_nv12_to_bgr failed");
status = clEnqueueReleaseVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
status = interop->clEnqueueReleaseVA_APIMediaSurfacesINTEL(q, 2, images, 0, NULL, NULL);
if (status != CL_SUCCESS)
CV_Error(cv::Error::OpenCLApiCallError, "OpenCL: clEnqueueReleaseVA_APIMediaSurfacesINTEL failed");
@ -683,6 +696,7 @@ void convertFromVASurface(VADisplay display, VASurfaceID surface, Size size, Out
else
# endif // HAVE_VA_INTEL
{
init_libva();
Mat m = dst.getMat();
// TODO Add support for roi

View File

@ -75,7 +75,7 @@ endif()
include(${CMAKE_CURRENT_LIST_DIR}/cmake/plugin.cmake)
set(tgts)
set(tgts "PRIVATE")
if(TARGET ocv.3rdparty.mediasdk)
if("mfx" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all")
@ -157,10 +157,26 @@ if(TARGET ocv.3rdparty.ffmpeg)
list(APPEND tgts ocv.3rdparty.ffmpeg)
elseif("ffmpeg" IN_LIST VIDEOIO_PLUGIN_LIST OR VIDEOIO_PLUGIN_LIST STREQUAL "all")
ocv_create_builtin_videoio_plugin("opencv_videoio_ffmpeg" ocv.3rdparty.ffmpeg "cap_ffmpeg.cpp")
if(TARGET ocv.3rdparty.ffmpeg.plugin_deps)
ocv_target_link_libraries(opencv_videoio_ffmpeg ocv.3rdparty.ffmpeg.plugin_deps)
endif()
if(TARGET ocv.3rdparty.mediasdk
AND NOT OPENCV_FFMPEG_DISABLE_MEDIASDK
)
ocv_target_link_libraries(opencv_videoio_ffmpeg ocv.3rdparty.mediasdk)
endif()
else()
list(APPEND videoio_hdrs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ffmpeg_impl.hpp)
list(APPEND videoio_srcs ${CMAKE_CURRENT_LIST_DIR}/src/cap_ffmpeg.cpp)
list(APPEND tgts ocv.3rdparty.ffmpeg)
if(TARGET ocv.3rdparty.ffmpeg.builtin_deps)
list(APPEND tgts ocv.3rdparty.ffmpeg.builtin_deps)
endif()
if(TARGET ocv.3rdparty.mediasdk
AND NOT OPENCV_FFMPEG_DISABLE_MEDIASDK
)
list(APPEND tgts ocv.3rdparty.mediasdk)
endif()
endif()
endif()
@ -213,6 +229,21 @@ if(TARGET ocv.3rdparty.android_native_camera)
list(APPEND tgts ocv.3rdparty.android_native_camera)
endif()
if(tgts STREQUAL "PRIVATE")
set(tgts "")
endif()
# install used dependencies only
if(NOT BUILD_SHARED_LIBS
AND NOT (CMAKE_VERSION VERSION_LESS "3.13.0") # upgrade CMake: https://gitlab.kitware.com/cmake/cmake/-/merge_requests/2152
)
foreach(tgt in ${tgts})
if(tgt MATCHES "^ocv\.3rdparty\.")
install(TARGETS ${tgt} EXPORT OpenCVModules)
endif()
endforeach()
endif()
ocv_set_module_sources(HEADERS ${videoio_ext_hdrs} ${videoio_hdrs} SOURCES ${videoio_srcs})
ocv_module_include_directories()
ocv_create_module()

View File

@ -99,6 +99,38 @@ if(HAVE_FFMPEG_WRAPPER)
ocv_add_external_target(ffmpeg "" "" "HAVE_FFMPEG_WRAPPER")
elseif(HAVE_FFMPEG)
ocv_add_external_target(ffmpeg "${FFMPEG_INCLUDE_DIRS}" "${FFMPEG_LIBRARIES}" "HAVE_FFMPEG")
set(__builtin_defines "")
set(__builtin_include_dirs "")
set(__builtin_libs "")
set(__plugin_defines "")
set(__plugin_include_dirs "")
set(__plugin_libs "")
if(HAVE_OPENCL)
set(__opencl_dirs "")
if(OPENCL_INCLUDE_DIRS)
set(__opencl_dirs "${OPENCL_INCLUDE_DIRS}")
elseif(OPENCL_INCLUDE_DIR)
set(__opencl_dirs "${OPENCL_INCLUDE_DIR}")
else()
set(__opencl_dirs "${OpenCV_SOURCE_DIR}/3rdparty/include/opencl/1.2")
endif()
# extra dependencies for buildin code (OpenCL dir is required for extensions like cl_d3d11.h)
# buildin HAVE_OPENCL is already defined through cvconfig.h
list(APPEND __builtin_include_dirs "${__opencl_dirs}")
# extra dependencies for
list(APPEND __plugin_defines "HAVE_OPENCL")
list(APPEND __plugin_include_dirs "${__opencl_dirs}")
endif()
# TODO: libva, d3d11
if(__builtin_include_dirs OR __builtin_include_defines OR __builtin_include_libs)
ocv_add_external_target(ffmpeg.builtin_deps "${__builtin_include_dirs}" "${__builtin_include_libs}" "${__builtin_defines}")
endif()
if(VIDEOIO_ENABLE_PLUGINS AND __plugin_include_dirs OR __plugin_include_defines OR __plugin_include_libs)
ocv_add_external_target(ffmpeg.plugin_deps "${__plugin_include_dirs}" "${__plugin_include_libs}" "${__plugin_defines}")
endif()
endif()
set(HAVE_FFMPEG ${HAVE_FFMPEG} PARENT_SCOPE)

View File

@ -1,7 +1,7 @@
set(MFX_DEFS "")
if(NOT HAVE_MFX)
find_package(VPL)
find_package(VPL QUIET)
if(VPL_FOUND)
set(MFX_INCLUDE_DIRS "")
set(MFX_LIBRARIES "${VPL_IMPORTED_TARGETS}")
@ -41,6 +41,10 @@ if(NOT HAVE_MFX)
endif()
endif()
if(NOT HAVE_MFX AND PKG_CONFIG_FOUND)
ocv_check_modules(MFX mfx)
endif()
if(HAVE_MFX AND UNIX)
foreach(mode NO_DEFAULT_PATH "")
find_path(MFX_va_INCLUDE va/va.h PATHS ${paths} PATH_SUFFIXES "include" ${mode})

View File

@ -184,7 +184,8 @@ enum VideoCaptureProperties {
CAP_PROP_ORIENTATION_META=48, //!< (read-only) Frame rotation defined by stream meta (applicable for FFmpeg back-end only)
CAP_PROP_ORIENTATION_AUTO=49, //!< if true - rotates output frames of CvCapture considering video file's metadata (applicable for FFmpeg back-end only) (https://github.com/opencv/opencv/issues/15499)
CAP_PROP_HW_ACCELERATION=50, //!< (**open-only**) Hardware acceleration type (see #VideoAccelerationType). Setting supported only via `params` parameter in cv::VideoCapture constructor / .open() method. Default value is backend-specific.
CAP_PROP_HW_DEVICE =51, //!< (**open-only**) Hardware device index (select GPU if multiple available)
CAP_PROP_HW_DEVICE =51, //!< (**open-only**) Hardware device index (select GPU if multiple available). Device enumeration is acceleration type specific.
CAP_PROP_HW_ACCELERATION_USE_OPENCL=52, //!< (**open-only**) If non-zero, create new OpenCL context and bind it to current thread. The OpenCL context created with Video Acceleration context attached it (if not attached yet) for optimized GPU data copy between HW accelerated decoder and cv::UMat.
#ifndef CV_DOXYGEN
CV__CAP_PROP_LATEST
#endif
@ -201,7 +202,8 @@ enum VideoWriterProperties {
//!< will work with grayscale frames.
VIDEOWRITER_PROP_DEPTH = 5, //!< Defaults to CV_8U.
VIDEOWRITER_PROP_HW_ACCELERATION = 6, //!< (**open-only**) Hardware acceleration type (see #VideoAccelerationType). Setting supported only via `params` parameter in VideoWriter constructor / .open() method. Default value is backend-specific.
VIDEOWRITER_PROP_HW_DEVICE = 7, //!< (**open-only**) Hardware device index (select GPU if multiple available)
VIDEOWRITER_PROP_HW_DEVICE = 7, //!< (**open-only**) Hardware device index (select GPU if multiple available). Device enumeration is acceleration type specific.
VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL= 8, //!< (**open-only**) If non-zero, create new OpenCL context and bind it to current thread. The OpenCL context created with Video Acceleration context attached it (if not attached yet) for optimized GPU data copy between cv::UMat and HW accelerated encoder.
#ifndef CV_DOXYGEN
CV__VIDEOWRITER_PROP_LATEST
#endif

View File

@ -92,8 +92,17 @@ public:
unsigned char* data = 0;
int step=0, width=0, height=0, cn=0;
if (!ffmpegCapture ||
!icvRetrieveFrame_FFMPEG_p(ffmpegCapture, &data, &step, &width, &height, &cn))
if (!ffmpegCapture)
return false;
// if UMat, try GPU to GPU copy using OpenCL extensions
if (frame.isUMat()) {
if (ffmpegCapture->retrieveHWFrame(frame)) {
return true;
}
}
if (!icvRetrieveFrame_FFMPEG_p(ffmpegCapture, &data, &step, &width, &height, &cn))
return false;
cv::Mat tmp(height, width, CV_MAKETYPE(CV_8U, cn), data, step);
@ -176,6 +185,13 @@ public:
return;
CV_Assert(image.depth() == CV_8U);
// if UMat, try GPU to GPU copy using OpenCL extensions
if (image.isUMat()) {
if (ffmpegWriter->writeHWFrame(image)) {
return;
}
}
icvWriteFrame_FFMPEG_p(ffmpegWriter, (const uchar*)image.getMat().ptr(), (int)image.step(), image.cols(), image.rows(), image.channels(), 0);
}
virtual bool open( const cv::String& filename, int fourcc, double fps, cv::Size frameSize, const VideoWriterParameters& params )

View File

@ -5,7 +5,10 @@
// Copyright (C) 2020-2021 Intel Corporation
#include "opencv2/videoio.hpp"
#if defined(__OPENCV_BUILD) || defined(OPENCV_HAVE_CVCONFIG_H) // TODO Properly detect and add D3D11 / LIBVA dependencies for standalone plugins
#ifdef HAVE_OPENCL
#include "opencv2/core/ocl.hpp"
#endif
#if defined(__OPENCV_BUILD) && !defined(BUILD_PLUGIN) // TODO Properly detect and add D3D11 / LIBVA dependencies for standalone plugins
#include "cvconfig.h"
#endif
#include <sstream>
@ -14,16 +17,31 @@
#define D3D11_NO_HELPERS
#include <d3d11.h>
#include <codecvt>
#include "opencv2/core/directx.hpp"
#ifdef HAVE_OPENCL
#include <CL/cl_d3d11.h>
#endif
#endif // HAVE_D3D11
#ifdef HAVE_VA
#include <va/va_backend.h>
#ifdef HAVE_VA_INTEL
#include "opencv2/core/va_intel.hpp"
#ifndef CL_TARGET_OPENCL_VERSION
#define CL_TARGET_OPENCL_VERSION 120
#endif
#ifdef HAVE_VA_INTEL_OLD_HEADER
#include <CL/va_ext.h>
#else
#include <CL/cl_va_api_media_sharing_intel.h>
#endif
#endif
#endif // HAVE_VA
// FFMPEG "C" headers
extern "C" {
#include <libavcodec/avcodec.h>
#include <libavutil/avutil.h>
#include <libavutil/hwcontext.h>
#ifdef HAVE_D3D11
#include <libavutil/hwcontext_d3d11va.h>
@ -31,8 +49,23 @@ extern "C" {
#ifdef HAVE_VA
#include <libavutil/hwcontext_vaapi.h>
#endif
#ifdef HAVE_MFX // dependency only on MFX header files, no linkage dependency
#include <libavutil/hwcontext_qsv.h>
#endif
}
#define HW_DEFAULT_POOL_SIZE 32
#define HW_DEFAULT_SW_FORMAT AV_PIX_FMT_NV12
using namespace cv;
static AVCodec *hw_find_codec(AVCodecID id, AVHWDeviceType hw_type, int (*check_category)(const AVCodec *),
const char *disabled_codecs, AVPixelFormat *hw_pix_fmt);
static AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname, bool use_opencl);
static AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format);
static AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPixelFormat * fmt);
static VideoAccelerationType hw_type_to_va_type(AVHWDeviceType hw_type);
static
const char* getVideoAccelerationName(VideoAccelerationType va_type)
{
@ -70,7 +103,7 @@ std::string getDecoderConfiguration(VideoAccelerationType va_type, AVDictionary
case VIDEO_ACCELERATION_ANY: return "d3d11va";
case VIDEO_ACCELERATION_D3D11: return "d3d11va";
case VIDEO_ACCELERATION_VAAPI: return "";
case VIDEO_ACCELERATION_MFX: return "";
case VIDEO_ACCELERATION_MFX: return ""; // "qsv" fails if non-Intel D3D11 device
}
return "";
#else
@ -80,7 +113,7 @@ std::string getDecoderConfiguration(VideoAccelerationType va_type, AVDictionary
case VIDEO_ACCELERATION_ANY: return "vaapi.iHD";
case VIDEO_ACCELERATION_D3D11: return "";
case VIDEO_ACCELERATION_VAAPI: return "vaapi.iHD";
case VIDEO_ACCELERATION_MFX: return "";
case VIDEO_ACCELERATION_MFX: return "qsv.iHD";
}
return "";
#endif
@ -125,7 +158,6 @@ std::string getEncoderConfiguration(VideoAccelerationType va_type, AVDictionary
#endif
}
static
std::string getDecoderDisabledCodecs(AVDictionary *dict)
{
@ -170,19 +202,6 @@ std::string getEncoderDisabledCodecs(AVDictionary *dict)
#endif
}
#define HW_DEFAULT_POOL_SIZE 32
#define HW_DEFAULT_SW_FORMAT AV_PIX_FMT_NV12
using namespace cv;
static AVCodec *hw_find_codec(AVCodecID id, AVHWDeviceType hw_type, int (*check_category)(const AVCodec *),
const char *disabled_codecs, AVPixelFormat *hw_pix_fmt);
static AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname);
static AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format);
static AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPixelFormat * fmt);
static VideoAccelerationType hw_type_to_va_type(AVHWDeviceType hw_type);
static
bool hw_check_device(AVBufferRef* ctx, AVHWDeviceType hw_type, const std::string& device_subname) {
if (!ctx)
@ -259,75 +278,343 @@ bool hw_check_device(AVBufferRef* ctx, AVHWDeviceType hw_type, const std::string
}
static
AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname) {
if (AV_HWDEVICE_TYPE_NONE == hw_type)
return NULL;
AVHWDeviceType child_type = hw_type;
if (hw_type == AV_HWDEVICE_TYPE_QSV) {
#ifdef _WIN32
child_type = AV_HWDEVICE_TYPE_DXVA2;
#else
child_type = AV_HWDEVICE_TYPE_VAAPI;
#endif
}
AVBufferRef* hw_device_ctx = NULL;
char device[128] = "";
char* pdevice = NULL;
if (hw_device >= 0 && hw_device < 100000) {
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
snprintf(device, sizeof(device), "/dev/dri/renderD%d", 128 + hw_device);
} else {
snprintf(device, sizeof(device), "%d", hw_device);
}
pdevice = device;
}
const char *hw_child_name = av_hwdevice_get_type_name(child_type);
const char *device_name = pdevice ? pdevice : "'default'";
int err = av_hwdevice_ctx_create(&hw_device_ctx, child_type, pdevice, NULL, 0);
if (hw_device_ctx && err >= 0)
AVBufferRef* hw_create_derived_context(AVHWDeviceType hw_type, AVBufferRef* hw_device_ctx) {
AVBufferRef* derived_ctx = NULL;
const char* hw_name = av_hwdevice_get_type_name(hw_type);
int err = av_hwdevice_ctx_create_derived(&derived_ctx, hw_type, hw_device_ctx, 0);
if (!derived_ctx || err < 0)
{
CV_LOG_DEBUG(NULL, "FFMPEG: Created video acceleration context (av_hwdevice_ctx_create) for " << hw_child_name << " on device " << device_name);
if (!hw_check_device(hw_device_ctx, hw_type, device_subname)) {
av_buffer_unref(&hw_device_ctx);
return NULL;
}
if (hw_type != child_type) {
AVBufferRef *derived_ctx = NULL;
const char *hw_name = av_hwdevice_get_type_name(hw_type);
err = av_hwdevice_ctx_create_derived(&derived_ctx, hw_type, hw_device_ctx, 0);
if (!derived_ctx || err < 0)
{
if (derived_ctx)
av_buffer_unref(&derived_ctx);
CV_LOG_INFO(NULL, "FFMPEG: Failed to create derived video acceleration (av_hwdevice_ctx_create_derived) for " << hw_name << ". Error=" << err);
}
else
{
CV_LOG_DEBUG(NULL, "FFMPEG: Created derived video acceleration context (av_hwdevice_ctx_create_derived) for " << hw_name);
}
av_buffer_unref(&hw_device_ctx);
return derived_ctx;
} else {
return hw_device_ctx;
}
if (derived_ctx)
av_buffer_unref(&derived_ctx);
CV_LOG_INFO(NULL, "FFMPEG: Failed to create derived video acceleration (av_hwdevice_ctx_create_derived) for " << hw_name << ". Error=" << err);
return NULL;
}
else
{
const char *hw_name = hw_child_name;
CV_LOG_INFO(NULL, "FFMPEG: Failed to create " << hw_name << " video acceleration (av_hwdevice_ctx_create) on device " << device_name);
// Store child context in 'user_opaque' field of parent context.
struct FreeChildContext {
static void free(struct AVHWDeviceContext* ctx) {
AVBufferRef* child_ctx = (AVBufferRef*)ctx->user_opaque;
if (child_ctx)
av_buffer_unref(&child_ctx);
}
};
AVHWDeviceContext* ctx = (AVHWDeviceContext*)derived_ctx->data;
ctx->user_opaque = av_buffer_ref(hw_device_ctx);
ctx->free = FreeChildContext::free;
CV_LOG_INFO(NULL, "FFMPEG: Created derived video acceleration context (av_hwdevice_ctx_create_derived) for " << hw_name);
return derived_ctx;
}
}
#ifdef HAVE_OPENCL // GPU buffer interop with cv::UMat
// FFmpeg context attached to OpenCL context
class OpenCL_FFMPEG_Context : public ocl::Context::UserContext {
public:
OpenCL_FFMPEG_Context(AVBufferRef* ctx) {
ctx_ = av_buffer_ref(ctx);
}
virtual ~OpenCL_FFMPEG_Context() {
av_buffer_unref(&ctx_);
}
AVBufferRef* GetAVHWDevice() {
return ctx_;
}
private:
AVBufferRef* ctx_;
};
#ifdef HAVE_MFX
static
int hw_find_qsv_surface_index(AVFrame* hw_frame)
{
if (AV_PIX_FMT_QSV != hw_frame->format)
return -1;
mfxFrameSurface1* surface = (mfxFrameSurface1*)hw_frame->data[3]; // As defined by AV_PIX_FMT_QSV
AVHWFramesContext* frames_ctx = (AVHWFramesContext*)hw_frame->hw_frames_ctx->data;
AVQSVFramesContext* qsv_ctx = (AVQSVFramesContext*)frames_ctx->hwctx;
for (int i = 0; i < qsv_ctx->nb_surfaces; i++) {
if (surface == qsv_ctx->surfaces + i) {
return i;
}
}
return -1;
}
#endif
#ifdef HAVE_VA
static
VADisplay hw_get_va_display(AVHWDeviceContext* hw_device_ctx)
{
if (hw_device_ctx->type == AV_HWDEVICE_TYPE_QSV) { // we stored pointer to child context in 'user_opaque' field
AVBufferRef* ctx = (AVBufferRef*)hw_device_ctx->user_opaque;
hw_device_ctx = (AVHWDeviceContext*)ctx->data;
}
if (hw_device_ctx && hw_device_ctx->type == AV_HWDEVICE_TYPE_VAAPI) {
return ((AVVAAPIDeviceContext*)hw_device_ctx->hwctx)->display;
}
return NULL;
}
#endif // HAVE_VA
#ifdef HAVE_VA_INTEL
static
VASurfaceID hw_get_va_surface(AVFrame* hw_frame) {
if (AV_PIX_FMT_VAAPI == hw_frame->format) {
return (VASurfaceID)(size_t)hw_frame->data[3]; // As defined by AV_PIX_FMT_VAAPI
}
#ifdef HAVE_MFX
else if (AV_PIX_FMT_QSV == hw_frame->format) {
int frame_idx = hw_find_qsv_surface_index(hw_frame);
if (frame_idx >= 0) { // frame index is same in parent (QSV) and child (VAAPI) frame context
AVHWFramesContext *frames_ctx = (AVHWFramesContext *) hw_frame->hw_frames_ctx->data;
AVHWFramesContext *child_ctx = (AVHWFramesContext *) frames_ctx->user_opaque;
if (child_ctx && AV_HWDEVICE_TYPE_VAAPI == child_ctx->device_ctx->type) {
AVVAAPIFramesContext *vaapi_ctx = (AVVAAPIFramesContext *) child_ctx->hwctx;
CV_Assert(frame_idx < vaapi_ctx->nb_surfaces);
return vaapi_ctx->surface_ids[frame_idx];
}
}
}
#endif // HAVE_MFX
return VA_INVALID_SURFACE;
}
#endif // HAVE_VA_INTEL
#ifdef HAVE_D3D11
static
AVD3D11VADeviceContext* hw_get_d3d11_device_ctx(AVHWDeviceContext* hw_device_ctx) {
if (AV_HWDEVICE_TYPE_QSV == hw_device_ctx->type) { // we stored pointer to child context in 'user_opaque' field
AVBufferRef* ctx = (AVBufferRef*)hw_device_ctx->user_opaque;
hw_device_ctx = (AVHWDeviceContext*)ctx->data;
}
if (AV_HWDEVICE_TYPE_D3D11VA == hw_device_ctx->type) {
return (AVD3D11VADeviceContext*)hw_device_ctx->hwctx;
}
return NULL;
}
ID3D11Texture2D* hw_get_d3d11_texture(AVFrame* hw_frame, int* subresource) {
ID3D11Texture2D* texture = NULL;
if (AV_PIX_FMT_D3D11 == hw_frame->format) {
texture = (ID3D11Texture2D*)hw_frame->data[0]; // As defined by AV_PIX_FMT_D3D11
*subresource = (intptr_t)hw_frame->data[1]; // As defined by AV_PIX_FMT_D3D11
}
#ifdef HAVE_MFX
else if (AV_PIX_FMT_QSV == hw_frame->format) {
AVHWFramesContext *frames_ctx = (AVHWFramesContext *) hw_frame->hw_frames_ctx->data;
AVHWFramesContext *child_ctx = (AVHWFramesContext *) frames_ctx->user_opaque;
if (child_ctx && AV_HWDEVICE_TYPE_D3D11VA == child_ctx->device_ctx->type) {
texture = ((AVD3D11VAFramesContext*)child_ctx->hwctx)->texture;
}
*subresource = hw_find_qsv_surface_index(hw_frame);
CV_Assert(*subresource >= 0);
}
#endif
return texture;
}
// In D3D11 case we allocate additional texture as single texture (not texture array) because
// OpenCL interop with D3D11 doesn't support/work with NV12 sub-texture of texture array.
ID3D11Texture2D* hw_get_d3d11_single_texture(AVFrame* hw_frame, AVD3D11VADeviceContext* d3d11_device_ctx, ID3D11Texture2D* texture) {
AVHWFramesContext* frames_ctx = (AVHWFramesContext*)hw_frame->hw_frames_ctx->data;
if (AV_HWDEVICE_TYPE_QSV == frames_ctx->device_ctx->type) {
frames_ctx = (AVHWFramesContext*)frames_ctx->user_opaque; // we stored pointer to child context in 'user_opaque' field
}
if (!frames_ctx || AV_HWDEVICE_TYPE_D3D11VA != frames_ctx->device_ctx->type) {
return NULL;
}
ID3D11Texture2D* singleTexture = (ID3D11Texture2D*)frames_ctx->user_opaque;
if (!singleTexture && d3d11_device_ctx && texture) {
D3D11_TEXTURE2D_DESC desc = {};
texture->GetDesc(&desc);
desc.ArraySize = 1;
desc.BindFlags |= D3D11_BIND_SHADER_RESOURCE;
desc.MiscFlags |= D3D11_RESOURCE_MISC_SHARED;
if (SUCCEEDED(d3d11_device_ctx->device->CreateTexture2D(&desc, NULL, &singleTexture))) {
frames_ctx->user_opaque = singleTexture;
}
}
return singleTexture;
}
#endif // HAVE_D3D11
static
AVHWDeviceType hw_check_opencl_context(AVHWDeviceContext* ctx) {
ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrentRef();
if (!ctx || ocl_context.empty())
return AV_HWDEVICE_TYPE_NONE;
#ifdef HAVE_VA_INTEL
VADisplay vadisplay_ocl = ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_VA_API_DISPLAY_INTEL);
VADisplay vadisplay_ctx = hw_get_va_display(ctx);
if (vadisplay_ocl && vadisplay_ocl == vadisplay_ctx)
return AV_HWDEVICE_TYPE_VAAPI;
#endif
#ifdef HAVE_D3D11
ID3D11Device* d3d11device_ocl = (ID3D11Device*)ocl_context.getContext().getOpenCLContextProperty(CL_CONTEXT_D3D11_DEVICE_KHR);
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(ctx);
if (d3d11_device_ctx && d3d11device_ocl && d3d11_device_ctx->device == d3d11device_ocl)
return AV_HWDEVICE_TYPE_D3D11VA;
#endif
return AV_HWDEVICE_TYPE_NONE;
}
static
void hw_init_opencl(AVBufferRef* ctx) {
if (!ctx)
return;
AVHWDeviceContext* hw_device_ctx = (AVHWDeviceContext*)ctx->data;
if (!hw_device_ctx)
return;
#ifdef HAVE_VA_INTEL
VADisplay va_display = hw_get_va_display(hw_device_ctx);
if (va_display) {
va_intel::ocl::initializeContextFromVA(va_display);
}
#endif
#ifdef HAVE_D3D11
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx);
if (d3d11_device_ctx) {
directx::ocl::initializeContextFromD3D11Device(d3d11_device_ctx->device);
}
#endif
if (hw_check_opencl_context(hw_device_ctx) != AV_HWDEVICE_TYPE_NONE) {
// Attach AVHWDeviceContext to OpenCL context
ocl::Context &ocl_context = ocl::OpenCLExecutionContext::getCurrent().getContext();
ocl_context.setUserContext(std::make_shared<OpenCL_FFMPEG_Context>(ctx));
}
}
static
AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format)
{
AVBufferRef *hw_frames_ref = nullptr;
AVBufferRef* hw_create_context_from_opencl(ocl::OpenCLExecutionContext& ocl_context, AVHWDeviceType hw_type) {
if (ocl_context.empty())
return NULL;
auto ocl_ffmpeg_context = ocl_context.getContext().getUserContext<OpenCL_FFMPEG_Context>();
if (!ocl_ffmpeg_context)
return NULL;
AVBufferRef* ctx = ocl_ffmpeg_context->GetAVHWDevice();
if (hw_type != ((AVHWDeviceContext*)ctx->data)->type) {
ctx = hw_create_derived_context(hw_type, ctx);
}
else {
ctx = av_buffer_ref(ctx);
}
if (ctx)
CV_LOG_INFO(NULL, "FFMPEG: Using " << av_hwdevice_get_type_name(hw_type) << " video acceleration context attached to OpenCL context");
return ctx;
}
#endif // HAVE_OPENCL
static
AVBufferRef* hw_create_device(AVHWDeviceType hw_type, int hw_device, const std::string& device_subname, bool use_opencl) {
AVBufferRef* hw_device_ctx = NULL;
if (AV_HWDEVICE_TYPE_NONE == hw_type)
return NULL;
#ifdef HAVE_OPENCL
// Check if OpenCL context has AVHWDeviceContext attached to it
ocl::OpenCLExecutionContext& ocl_context = ocl::OpenCLExecutionContext::getCurrentRef();
try {
hw_device_ctx = hw_create_context_from_opencl(ocl_context, hw_type);
if (hw_device_ctx) {
if (hw_device >= 0)
CV_LOG_ERROR(NULL, "VIDEOIO/FFMPEG: ignoring property HW_DEVICE as device context already created and attached to OpenCL context");
return hw_device_ctx;
}
}
catch (...) {
CV_LOG_INFO(NULL, "FFMPEG: Exception creating Video Acceleration context using current OpenCL context");
}
#endif
// Create new media context. In QSV case, first create 'child' context.
std::vector<AVHWDeviceType> child_types = { hw_type };
if (hw_type == AV_HWDEVICE_TYPE_QSV) {
#ifdef _WIN32
child_types = { AV_HWDEVICE_TYPE_D3D11VA, AV_HWDEVICE_TYPE_DXVA2 };
#else
child_types = { AV_HWDEVICE_TYPE_VAAPI };
#endif
}
for (AVHWDeviceType child_type : child_types) {
char device[128] = "";
char* pdevice = NULL;
if (hw_device >= 0 && hw_device < 100000) {
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
snprintf(device, sizeof(device), "/dev/dri/renderD%d", 128 + hw_device);
}
else {
snprintf(device, sizeof(device), "%d", hw_device);
}
pdevice = device;
}
const char* hw_child_name = av_hwdevice_get_type_name(child_type);
const char* device_name = pdevice ? pdevice : "'default'";
int err = av_hwdevice_ctx_create(&hw_device_ctx, child_type, pdevice, NULL, 0);
if (hw_device_ctx && err >= 0)
{
if (!hw_check_device(hw_device_ctx, hw_type, device_subname)) {
av_buffer_unref(&hw_device_ctx);
continue;
}
CV_LOG_INFO(NULL, "FFMPEG: Created video acceleration context (av_hwdevice_ctx_create) for " << hw_child_name << " on device " << device_name);
#ifdef HAVE_OPENCL
// if OpenCL context not created yet or property HW_ACCELERATION_USE_OPENCL set, create OpenCL context with binding to video acceleration context
if (ocl::haveOpenCL()) {
if (ocl_context.empty() || use_opencl) {
try {
hw_init_opencl(hw_device_ctx);
ocl_context = ocl::OpenCLExecutionContext::getCurrentRef();
if (!ocl_context.empty()) {
CV_LOG_INFO(NULL, "FFMPEG: Created OpenCL context with " << hw_child_name <<
" video acceleration on OpenCL device: " << ocl_context.getDevice().name());
}
} catch (...) {
CV_LOG_INFO(NULL, "FFMPEG: Exception creating OpenCL context with " << hw_child_name << " video acceleration");
}
}
else {
CV_LOG_INFO(NULL, "FFMPEG: Can't bind " << hw_child_name << " video acceleration context to already created OpenCL context");
}
}
#else
CV_UNUSED(use_opencl);
#endif
if (hw_type != child_type) {
AVBufferRef* derived_ctx = hw_create_derived_context(hw_type, hw_device_ctx);
av_buffer_unref(&hw_device_ctx);
return derived_ctx;
} else {
return hw_device_ctx;
}
}
else
{
const char* hw_name = hw_child_name;
CV_LOG_INFO(NULL, "FFMPEG: Failed to create " << hw_name << " video acceleration (av_hwdevice_ctx_create) on device " << device_name);
}
}
return NULL;
}
static
AVBufferRef* hw_create_frames(struct AVCodecContext* codec_ctx, AVBufferRef *hw_device_ctx, int width, int height, AVPixelFormat hw_format)
{
AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)hw_device_ctx->data;
AVBufferRef* child_ctx = hw_device_ctx;
// In QSV case we first allocate child D3D11/VAAPI frames (except DXVA2 as no OpenCL interop), then derive to parent QSV frames
if (AV_HWDEVICE_TYPE_QSV == device_ctx->type) {
AVBufferRef *ctx = (AVBufferRef *) device_ctx->user_opaque; // child context stored during creation of derived context
if (ctx && AV_HWDEVICE_TYPE_DXVA2 != ((AVHWDeviceContext *) ctx->data)->type) {
child_ctx = ctx;
}
}
AVBufferRef *hw_frames_ref = nullptr;
if (codec_ctx)
{
int res = avcodec_get_hw_frames_parameters(ctx, hw_device_ctx, hw_format, &hw_frames_ref);
int res = avcodec_get_hw_frames_parameters(codec_ctx, child_ctx, hw_format, &hw_frames_ref);
if (res < 0)
{
CV_LOG_DEBUG(NULL, "FFMPEG: avcodec_get_hw_frames_parameters() call failed: " << res)
@ -335,7 +622,7 @@ AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device
}
if (!hw_frames_ref)
{
hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx);
hw_frames_ref = av_hwframe_ctx_alloc(child_ctx);
}
if (!hw_frames_ref)
{
@ -345,12 +632,41 @@ AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device
AVHWFramesContext *frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
frames_ctx->width = width;
frames_ctx->height = height;
if (frames_ctx->format == AV_PIX_FMT_NONE)
frames_ctx->format = hw_format;
if (frames_ctx->format == AV_PIX_FMT_NONE) {
if (child_ctx == hw_device_ctx) {
frames_ctx->format = hw_format;
}
else {
AVHWFramesConstraints* constraints = av_hwdevice_get_hwframe_constraints(child_ctx, NULL);
if (constraints) {
frames_ctx->format = constraints->valid_hw_formats[0];
av_hwframe_constraints_free(&constraints);
}
}
}
if (frames_ctx->sw_format == AV_PIX_FMT_NONE)
frames_ctx->sw_format = HW_DEFAULT_SW_FORMAT;
if (frames_ctx->initial_pool_size == 0)
frames_ctx->initial_pool_size = HW_DEFAULT_POOL_SIZE;
#ifdef HAVE_D3D11
if (frames_ctx->device_ctx && AV_HWDEVICE_TYPE_D3D11VA == frames_ctx->device_ctx->type) {
// BindFlags
AVD3D11VAFramesContext* frames_hwctx = (AVD3D11VAFramesContext*)frames_ctx->hwctx;
frames_hwctx->BindFlags |= D3D11_BIND_DECODER | D3D11_BIND_VIDEO_ENCODER;
// See function hw_get_d3d11_single_texture(), it allocates additional ID3D11Texture2D texture and
// attaches it as 'user_opaque' field. We have to set free() callback before av_hwframe_ctx_init() call.
struct D3D11SingleTexture {
static void free(struct AVHWFramesContext* ctx) {
ID3D11Texture2D* singleTexture = (ID3D11Texture2D*)ctx->user_opaque;
if (ctx->user_opaque)
singleTexture->Release();
}
};
frames_ctx->free = D3D11SingleTexture::free;
}
#endif
int res = av_hwframe_ctx_init(hw_frames_ref);
if (res < 0)
{
@ -358,7 +674,25 @@ AVBufferRef* hw_create_frames(struct AVCodecContext* ctx, AVBufferRef *hw_device
av_buffer_unref(&hw_frames_ref);
return NULL;
}
return hw_frames_ref;
if (child_ctx != hw_device_ctx) {
AVBufferRef* derived_frame_ctx = NULL;
int flags = AV_HWFRAME_MAP_READ | AV_HWFRAME_MAP_WRITE;
res = av_hwframe_ctx_create_derived(&derived_frame_ctx, hw_format, hw_device_ctx, hw_frames_ref, flags);
av_buffer_unref(&hw_frames_ref);
if (res < 0)
{
CV_LOG_INFO(NULL, "FFMPEG: Failed to create derived HW frame context (av_hwframe_ctx_create_derived): " << res);
return NULL;
}
else {
((AVHWFramesContext*)derived_frame_ctx->data)->user_opaque = frames_ctx;
return derived_frame_ctx;
}
}
else {
return hw_frames_ref;
}
}
static
@ -455,6 +789,110 @@ AVPixelFormat hw_get_format_callback(struct AVCodecContext *ctx, const enum AVPi
return fmt[0];
}
// GPU color conversion NV12->BGRA via OpenCL extensions
static bool
hw_copy_frame_to_umat(AVBufferRef* ctx, AVFrame* hw_frame, cv::OutputArray output) {
CV_UNUSED(hw_frame);
CV_UNUSED(output);
if (!ctx)
return false;
#ifdef HAVE_OPENCL
try {
// check that current OpenCL context initilized with binding to same VAAPI/D3D11 context
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext *) ctx->data;
AVHWDeviceType child_type = hw_check_opencl_context(hw_device_ctx);
if (child_type == AV_HWDEVICE_TYPE_NONE)
return false;
#ifdef HAVE_VA_INTEL
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
VADisplay va_display = hw_get_va_display(hw_device_ctx);
VASurfaceID va_surface = hw_get_va_surface(hw_frame);
if (va_display && va_surface != VA_INVALID_SURFACE) {
va_intel::convertFromVASurface(va_display, va_surface, {hw_frame->width, hw_frame->height}, output);
return true;
}
}
#endif
#ifdef HAVE_D3D11
if (child_type == AV_HWDEVICE_TYPE_D3D11VA) {
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx);
int subresource = 0;
ID3D11Texture2D* texture = hw_get_d3d11_texture(hw_frame, &subresource);
ID3D11Texture2D* singleTexture = hw_get_d3d11_single_texture(hw_frame, d3d11_device_ctx, texture);
if (texture && singleTexture) {
// Copy D3D11 sub-texture to D3D11 single texture
d3d11_device_ctx->device_context->CopySubresourceRegion(singleTexture, 0, 0, 0, 0, texture, subresource, NULL);
// Copy D3D11 single texture to cv::UMat
directx::convertFromD3D11Texture2D(singleTexture, output);
return true;
}
}
#endif
}
catch (...)
{
return false;
}
#endif // HAVE_OPENCL
return false;
}
// GPU color conversion BGRA->NV12 via OpenCL extensions
static bool
hw_copy_umat_to_frame(AVBufferRef* ctx, cv::InputArray input, AVFrame* hw_frame) {
CV_UNUSED(input);
CV_UNUSED(hw_frame);
if (!ctx)
return false;
#ifdef HAVE_OPENCL
try {
// check that current OpenCL context initilized with binding to same VAAPI/D3D11 context
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext *) ctx->data;
AVHWDeviceType child_type = hw_check_opencl_context(hw_device_ctx);
if (child_type == AV_HWDEVICE_TYPE_NONE)
return false;
#ifdef HAVE_VA_INTEL
if (child_type == AV_HWDEVICE_TYPE_VAAPI) {
VADisplay va_display = hw_get_va_display(hw_device_ctx);
VASurfaceID va_surface = hw_get_va_surface(hw_frame);
if (va_display != NULL && va_surface != VA_INVALID_SURFACE) {
va_intel::convertToVASurface(va_display, input, va_surface, {hw_frame->width, hw_frame->height});
return true;
}
}
#endif
#ifdef HAVE_D3D11
if (child_type == AV_HWDEVICE_TYPE_D3D11VA) {
AVD3D11VADeviceContext* d3d11_device_ctx = hw_get_d3d11_device_ctx(hw_device_ctx);
int subresource = 0;
ID3D11Texture2D* texture = hw_get_d3d11_texture(hw_frame, &subresource);
ID3D11Texture2D* singleTexture = hw_get_d3d11_single_texture(hw_frame, d3d11_device_ctx, texture);
if (texture && singleTexture) {
// Copy cv::UMat to D3D11 single texture
directx::convertToD3D11Texture2D(input, singleTexture);
// Copy D3D11 single texture to D3D11 sub-texture
d3d11_device_ctx->device_context->CopySubresourceRegion(texture, subresource, 0, 0, 0, singleTexture, 0, NULL);
return true;
}
}
#endif
}
catch (...)
{
return false;
}
#endif // HAVE_OPENCL
return false;
}
static
VideoAccelerationType hw_type_to_va_type(AVHWDeviceType hw_type) {
struct HWTypeFFMPEG {

View File

@ -476,6 +476,7 @@ struct CvCapture_FFMPEG
bool setProperty(int, double);
bool grabFrame();
bool retrieveFrame(int, unsigned char** data, int* step, int* width, int* height, int* cn);
bool retrieveHWFrame(cv::OutputArray output);
void rotateFrame(cv::Mat &mat) const;
void init();
@ -537,6 +538,7 @@ struct CvCapture_FFMPEG
#endif
VideoAccelerationType va_type;
int hw_device;
int use_opencl;
};
void CvCapture_FFMPEG::init()
@ -574,6 +576,7 @@ void CvCapture_FFMPEG::init()
bsfc = NULL;
va_type = cv::VIDEO_ACCELERATION_NONE; // TODO OpenCV 5.0: change to _ANY?
hw_device = -1;
use_opencl = 0;
}
@ -922,6 +925,9 @@ bool CvCapture_FFMPEG::open(const char* _filename, const VideoCaptureParameters&
return false;
}
}
if (params.has(CAP_PROP_HW_ACCELERATION_USE_OPENCL)) {
use_opencl = params.get<int>(CAP_PROP_HW_ACCELERATION_USE_OPENCL);
}
if (params.warnUnusedParameters())
{
CV_LOG_ERROR(NULL, "VIDEOIO/FFMPEG: unsupported parameters in .open(), see logger INFO channel for details. Bailout");
@ -1051,7 +1057,7 @@ bool CvCapture_FFMPEG::open(const char* _filename, const VideoCaptureParameters&
if (codec) {
if (hw_pix_fmt != AV_PIX_FMT_NONE)
enc->get_format = hw_get_format_callback; // set callback to select HW pixel format, not SW format
enc->hw_device_ctx = hw_create_device(hw_type, hw_device, accel_iter.device_subname());
enc->hw_device_ctx = hw_create_device(hw_type, hw_device, accel_iter.device_subname(), use_opencl != 0);
if (!enc->hw_device_ctx)
{
CV_LOG_DEBUG(NULL, "FFMPEG: ... can't create H/W device: '" << accel_iter.hw_type_device_string() << "'");
@ -1476,6 +1482,22 @@ bool CvCapture_FFMPEG::retrieveFrame(int, unsigned char** data, int* step, int*
return true;
}
bool CvCapture_FFMPEG::retrieveHWFrame(cv::OutputArray output)
{
#if USE_AV_HW_CODECS
// check that we have HW frame in GPU memory
if (!picture || !picture->hw_frames_ctx) {
return false;
}
// GPU color conversion NV12->BGRA, from GPU media buffer to GPU OpenCL buffer
return hw_copy_frame_to_umat(video_st->codec->hw_device_ctx, picture, output);
#else
CV_UNUSED(output);
return false;
#endif
}
double CvCapture_FFMPEG::getProperty( int property_id ) const
{
if( !video_st ) return 0;
@ -1549,6 +1571,8 @@ double CvCapture_FFMPEG::getProperty( int property_id ) const
return static_cast<double>(va_type);
case CAP_PROP_HW_DEVICE:
return static_cast<double>(hw_device);
case CAP_PROP_HW_ACCELERATION_USE_OPENCL:
return static_cast<double>(use_opencl);
#endif // USE_AV_HW_CODECS
default:
break;
@ -1752,6 +1776,7 @@ struct CvVideoWriter_FFMPEG
double fps, int width, int height, const VideoWriterParameters& params );
void close();
bool writeFrame( const unsigned char* data, int step, int width, int height, int cn, int origin );
bool writeHWFrame(cv::InputArray input);
double getProperty(int propId) const;
void init();
@ -1774,6 +1799,7 @@ struct CvVideoWriter_FFMPEG
struct SwsContext *img_convert_ctx;
VideoAccelerationType va_type;
int hw_device;
int use_opencl;
};
static const char * icvFFMPEGErrStr(int err)
@ -1836,6 +1862,7 @@ void CvVideoWriter_FFMPEG::init()
frame_idx = 0;
va_type = VIDEO_ACCELERATION_NONE;
hw_device = -1;
use_opencl = 0;
ok = false;
}
@ -2210,6 +2237,41 @@ bool CvVideoWriter_FFMPEG::writeFrame( const unsigned char* data, int step, int
return ret;
}
bool CvVideoWriter_FFMPEG::writeHWFrame(cv::InputArray input) {
#if USE_AV_HW_CODECS
if (!video_st->codec->hw_frames_ctx)
return false;
// Get hardware frame from frame pool
AVFrame* hw_frame = av_frame_alloc();
if (!hw_frame) {
return false;
}
if (av_hwframe_get_buffer(video_st->codec->hw_frames_ctx, hw_frame, 0) < 0) {
av_frame_free(&hw_frame);
return false;
}
// GPU to GPU copy
if (!hw_copy_umat_to_frame(video_st->codec->hw_device_ctx, input, hw_frame)) {
av_frame_free(&hw_frame);
return false;
}
// encode
hw_frame->pts = frame_idx;
icv_av_write_frame_FFMPEG( oc, video_st, outbuf, outbuf_size, hw_frame, frame_idx);
frame_idx++;
av_frame_free(&hw_frame);
return true;
#else
CV_UNUSED(input);
return false;
#endif
}
double CvVideoWriter_FFMPEG::getProperty(int propId) const
{
CV_UNUSED(propId);
@ -2222,6 +2284,10 @@ double CvVideoWriter_FFMPEG::getProperty(int propId) const
{
return static_cast<double>(hw_device);
}
else if (propId == VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL)
{
return static_cast<double>(use_opencl);
}
#endif
return 0;
}
@ -2375,6 +2441,9 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
return false;
}
}
if (params.has(VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL)) {
use_opencl = params.get<int>(VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL);
}
if (params.warnUnusedParameters())
{
@ -2638,7 +2707,7 @@ bool CvVideoWriter_FFMPEG::open( const char * filename, int fourcc,
if (!codec)
continue;
hw_device_ctx = hw_create_device(hw_type, hw_device, accel_iter.device_subname());
hw_device_ctx = hw_create_device(hw_type, hw_device, accel_iter.device_subname(), use_opencl != 0);
if (!hw_device_ctx)
continue;
}

View File

@ -675,7 +675,6 @@ TEST_P(videocapture_acceleration, read)
VideoCaptureAPIs backend = get<1>(param);
VideoAccelerationType va_type = get<2>(param);
bool use_umat = get<3>(param);
int device_idx = -1;
const int frameNum = 15;
std::string filepath = cvtest::findDataFile("video/" + filename);
@ -695,13 +694,24 @@ TEST_P(videocapture_acceleration, read)
// HW reader
VideoCapture hw_reader(filepath, backend, {
CAP_PROP_HW_ACCELERATION, static_cast<int>(va_type),
CAP_PROP_HW_DEVICE, device_idx
});
std::vector<int> params = { CAP_PROP_HW_ACCELERATION, static_cast<int>(va_type) };
if (use_umat)
{
if (backend != CAP_FFMPEG)
throw SkipTestException(cv::String("UMat/OpenCL mapping is not supported by current backend: ") + backend_name);
if (!cv::videoio_registry::isBackendBuiltIn(backend))
throw SkipTestException(cv::String("UMat/OpenCL mapping is not supported through plugins yet: ") + backend_name);
params.push_back(CAP_PROP_HW_ACCELERATION_USE_OPENCL);
params.push_back(1);
}
VideoCapture hw_reader(filepath, backend, params);
if (!hw_reader.isOpened())
{
if (va_type == VIDEO_ACCELERATION_ANY || va_type == VIDEO_ACCELERATION_NONE)
if (use_umat)
{
throw SkipTestException(backend_name + " VideoCapture on " + filename + " not supported with HW acceleration + OpenCL/Umat mapping, skipping");
}
else if (va_type == VIDEO_ACCELERATION_ANY || va_type == VIDEO_ACCELERATION_NONE)
{
// ANY HW acceleration should have fallback to SW codecs
VideoCapture sw_reader(filepath, backend, {
@ -795,7 +805,7 @@ static const VideoAccelerationType hw_types[] = {
static bool hw_use_umat[] = {
false,
//true
true
};
INSTANTIATE_TEST_CASE_P(videoio, videocapture_acceleration, testing::Combine(
@ -819,7 +829,6 @@ TEST_P(videowriter_acceleration, write)
std::string extension = get<0>(param).ext;
double psnr_threshold = get<0>(param).PSNR;
VideoAccelerationType va_type = get<1>(param);
int device_idx = -1;
bool use_umat = get<2>(param);
std::string backend_name = cv::videoio_registry::getBackendName(backend);
if (!videoio_registry::hasBackend(backend))
@ -834,20 +843,31 @@ TEST_P(videowriter_acceleration, write)
// Write video
VideoAccelerationType actual_va;
{
std::vector<int> params = { VIDEOWRITER_PROP_HW_ACCELERATION, static_cast<int>(va_type) };
if (use_umat) {
if (backend != CAP_FFMPEG)
throw SkipTestException(cv::String("UMat/OpenCL mapping is not supported by current backend: ") + backend_name);
if (!cv::videoio_registry::isBackendBuiltIn(backend))
throw SkipTestException(cv::String("UMat/OpenCL mapping is not supported through plugins yet: ") + backend_name);
params.push_back(VIDEOWRITER_PROP_HW_ACCELERATION_USE_OPENCL);
params.push_back(1);
}
VideoWriter hw_writer(
filename,
backend,
VideoWriter::fourcc(codecid[0], codecid[1], codecid[2], codecid[3]),
fps,
sz,
{
VIDEOWRITER_PROP_HW_ACCELERATION, static_cast<int>(va_type),
VIDEOWRITER_PROP_HW_DEVICE, device_idx
}
params
);
if (!hw_writer.isOpened()) {
if (va_type == VIDEO_ACCELERATION_ANY || va_type == VIDEO_ACCELERATION_NONE)
if (!hw_writer.isOpened())
{
if (use_umat)
{
throw SkipTestException(backend_name + " VideoWriter on " + filename + " not supported with HW acceleration + OpenCL/Umat mapping, skipping");
}
else if (va_type == VIDEO_ACCELERATION_ANY || va_type == VIDEO_ACCELERATION_NONE)
{
// ANY HW acceleration should have fallback to SW codecs
{

View File

@ -151,11 +151,11 @@ int main(int argc, char** argv)
return 1;
}
cout << "VideoWriter backend = " << writer.getBackendName() << endl;
actual_accel = static_cast<VideoAccelerationType>(static_cast<int>(writer.get(CAP_PROP_HW_ACCELERATION)));
actual_accel = static_cast<VideoAccelerationType>(static_cast<int>(writer.get(VIDEOWRITER_PROP_HW_ACCELERATION)));
for (size_t i = 0; i < sizeof(acceleration_strings) / sizeof(acceleration_strings[0]); i++) {
if (actual_accel == acceleration_strings[i].acceleration) {
cout << "VideoWriter acceleration = " << acceleration_strings[i].str << endl;
cout << "VideoWriter acceleration device = " << (int)writer.get(CAP_PROP_HW_DEVICE) << endl;
cout << "VideoWriter acceleration device = " << (int)writer.get(VIDEOWRITER_PROP_HW_DEVICE) << endl;
break;
}
}