mirror of
https://github.com/opencv/opencv.git
synced 2025-08-05 22:19:14 +08:00
Merge pull request #21618 from sivanov-work:vpp_preproc_core
G-API: Add VPL/VPP preproc core module * Add BaseMediAdapter for VPL * Add PreprocSession & PreprocEngine interface part * Implement preproc UT, Fix base path * Add common PP interface, add common pp_params * Rough decoupling VPL & Preproc * Add syntax sugar for PP interface * Integrate VPP preproc in GIEbackend * Add PP bypass * Add perf tests for PP * Fix warning in vpl core UT * Add inner preproc resolution Unit Test * Remove VPP preproc description from single ROI sample * Apply SetROIBlob for diferent Infer operations * Eliminate extra branch-lines for cfg_param_parser & transcode_engine * Fix UT warning &PreprocSession compile * Fix compilation & warnings * Reduce Session&Engine code amount * Apply some comments * Revert IE changes, rename preproc * Fix for DX11 infer for OV: turn off texture array * Remove dependency PP on IE * Change fixture tests params * Apply other comments & turn off ROI for GPU * Fix compilation: remove forgotten INFER define * Apply debt comments * Fix PP UTs: add FrameInfo value comparator * Fix style * Remove standalone map for preproc frames storage * Add other comments
This commit is contained in:
parent
92312fbc0c
commit
8f1c502d2b
@ -186,6 +186,7 @@ set(gapi_srcs
|
||||
src/streaming/onevpl/cfg_params_parser.cpp
|
||||
src/streaming/onevpl/utils.cpp
|
||||
src/streaming/onevpl/data_provider_interface_exception.cpp
|
||||
src/streaming/onevpl/accelerators/surface/base_frame_adapter.cpp
|
||||
src/streaming/onevpl/accelerators/surface/cpu_frame_adapter.cpp
|
||||
src/streaming/onevpl/accelerators/surface/dx11_frame_adapter.cpp
|
||||
src/streaming/onevpl/accelerators/surface/surface.cpp
|
||||
@ -200,6 +201,8 @@ set(gapi_srcs
|
||||
src/streaming/onevpl/engine/decode/decode_session.cpp
|
||||
src/streaming/onevpl/engine/transcode/transcode_engine_legacy.cpp
|
||||
src/streaming/onevpl/engine/transcode/transcode_session.cpp
|
||||
src/streaming/onevpl/engine/preproc/preproc_engine.cpp
|
||||
src/streaming/onevpl/engine/preproc/preproc_session.cpp
|
||||
src/streaming/onevpl/demux/async_mfp_demux_data_provider.cpp
|
||||
src/streaming/onevpl/data_provider_dispatcher.cpp
|
||||
|
||||
@ -360,7 +363,9 @@ endif()
|
||||
# perf test dependencies postprocessing
|
||||
if(HAVE_GAPI_ONEVPL)
|
||||
# NB: TARGET opencv_perf_gapi doesn't exist before `ocv_add_perf_tests`
|
||||
# src/ is specified to include dirs for INTERNAL tests only.
|
||||
if(TARGET opencv_perf_gapi)
|
||||
target_include_directories(opencv_perf_gapi PRIVATE "${CMAKE_CURRENT_LIST_DIR}/src")
|
||||
ocv_target_compile_definitions(opencv_perf_gapi PRIVATE -DHAVE_ONEVPL)
|
||||
ocv_target_link_libraries(opencv_perf_gapi PRIVATE ${VPL_IMPORTED_TARGETS})
|
||||
if(HAVE_D3D11 AND HAVE_OPENCL)
|
||||
|
@ -11,6 +11,13 @@
|
||||
#include <opencv2/gapi/streaming/onevpl/source.hpp>
|
||||
#include <opencv2/gapi/streaming/cap.hpp>
|
||||
|
||||
#include "streaming/onevpl/engine/preproc/preproc_engine.hpp"
|
||||
#include "streaming/onevpl/engine/preproc/preproc_session.hpp"
|
||||
#include "streaming/onevpl/accelerators/accel_policy_interface.hpp"
|
||||
#include "streaming/onevpl/cfg_param_device_selector.hpp"
|
||||
#include "streaming/onevpl/accelerators/accel_policy_dx11.hpp"
|
||||
#include "streaming/onevpl/accelerators/accel_policy_cpu.hpp"
|
||||
|
||||
namespace opencv_test
|
||||
{
|
||||
using namespace perf;
|
||||
@ -32,10 +39,10 @@ using codec_t = std::string;
|
||||
using accel_mode_t = std::string;
|
||||
using source_description_t = std::tuple<source_t, codec_t, accel_mode_t>;
|
||||
|
||||
class OneVPLSourcePerfTest : public TestPerfParams<source_description_t> {};
|
||||
class VideoCapSourcePerfTest : public TestPerfParams<source_t> {};
|
||||
class OneVPLSourcePerf_Test : public TestPerfParams<source_description_t> {};
|
||||
class VideoCapSourcePerf_Test : public TestPerfParams<source_t> {};
|
||||
|
||||
PERF_TEST_P_(OneVPLSourcePerfTest, TestPerformance)
|
||||
PERF_TEST_P_(OneVPLSourcePerf_Test, TestPerformance)
|
||||
{
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
|
||||
@ -67,7 +74,7 @@ PERF_TEST_P_(OneVPLSourcePerfTest, TestPerformance)
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
PERF_TEST_P_(VideoCapSourcePerfTest, TestPerformance)
|
||||
PERF_TEST_P_(VideoCapSourcePerf_Test, TestPerformance)
|
||||
{
|
||||
using namespace cv::gapi::wip;
|
||||
|
||||
@ -82,7 +89,7 @@ PERF_TEST_P_(VideoCapSourcePerfTest, TestPerformance)
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Streaming, OneVPLSourcePerfTest,
|
||||
INSTANTIATE_TEST_CASE_P(Streaming, OneVPLSourcePerf_Test,
|
||||
Values(source_description_t(files[0], codec[0], ""),
|
||||
source_description_t(files[0], codec[0], "MFX_ACCEL_MODE_VIA_D3D11"),
|
||||
source_description_t(files[1], codec[1], ""),
|
||||
@ -90,10 +97,202 @@ INSTANTIATE_TEST_CASE_P(Streaming, OneVPLSourcePerfTest,
|
||||
source_description_t(files[2], codec[2], ""),
|
||||
source_description_t(files[2], codec[2], "MFX_ACCEL_MODE_VIA_D3D11")));
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Streaming, VideoCapSourcePerfTest,
|
||||
INSTANTIATE_TEST_CASE_P(Streaming, VideoCapSourcePerf_Test,
|
||||
Values(files[0],
|
||||
files[1],
|
||||
files[2]));
|
||||
|
||||
using pp_out_param_t = cv::GFrameDesc;
|
||||
using source_description_preproc_t = decltype(std::tuple_cat(std::declval<source_description_t>(),
|
||||
std::declval<std::tuple<pp_out_param_t>>()));
|
||||
class OneVPLSourcePerf_PP_Test : public TestPerfParams<source_description_preproc_t> {};
|
||||
|
||||
PERF_TEST_P_(OneVPLSourcePerf_PP_Test, TestPerformance)
|
||||
{
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
|
||||
const auto params = GetParam();
|
||||
source_t src = findDataFile(get<0>(params));
|
||||
codec_t type = get<1>(params);
|
||||
accel_mode_t mode = get<2>(params);
|
||||
pp_out_param_t res = get<3>(params);
|
||||
|
||||
std::vector<CfgParam> cfg_params {
|
||||
CfgParam::create_implementation("MFX_IMPL_TYPE_HARDWARE"),
|
||||
};
|
||||
|
||||
if (!type.empty()) {
|
||||
cfg_params.push_back(CfgParam::create_decoder_id(type.c_str()));
|
||||
}
|
||||
|
||||
if (!mode.empty()) {
|
||||
cfg_params.push_back(CfgParam::create_acceleration_mode(mode.c_str()));
|
||||
}
|
||||
|
||||
cfg_params.push_back(CfgParam::create_vpp_out_width(static_cast<uint16_t>(res.size.width)));
|
||||
cfg_params.push_back(CfgParam::create_vpp_out_height(static_cast<uint16_t>(res.size.height)));
|
||||
cfg_params.push_back(CfgParam::create_vpp_out_crop_x(0));
|
||||
cfg_params.push_back(CfgParam::create_vpp_out_crop_y(0));
|
||||
cfg_params.push_back(CfgParam::create_vpp_out_crop_w(static_cast<uint16_t>(res.size.width)));
|
||||
cfg_params.push_back(CfgParam::create_vpp_out_crop_h(static_cast<uint16_t>(res.size.height)));
|
||||
|
||||
auto source_ptr = cv::gapi::wip::make_onevpl_src(src, cfg_params);
|
||||
|
||||
cv::gapi::wip::Data out;
|
||||
TEST_CYCLE()
|
||||
{
|
||||
source_ptr->pull(out);
|
||||
}
|
||||
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
static pp_out_param_t full_hd = pp_out_param_t {cv::MediaFormat::NV12,
|
||||
{1920, 1080}};
|
||||
|
||||
static pp_out_param_t cif = pp_out_param_t {cv::MediaFormat::NV12,
|
||||
{352, 288}};
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Streaming_Source_PP, OneVPLSourcePerf_PP_Test,
|
||||
Values(source_description_preproc_t(files[0], codec[0], "", full_hd),
|
||||
source_description_preproc_t(files[0], codec[0], "", cif),
|
||||
source_description_preproc_t(files[0], codec[0], "MFX_ACCEL_MODE_VIA_D3D11", full_hd),
|
||||
source_description_preproc_t(files[0], codec[0], "MFX_ACCEL_MODE_VIA_D3D11", cif),
|
||||
source_description_preproc_t(files[1], codec[1], "", full_hd),
|
||||
source_description_preproc_t(files[1], codec[1], "", cif),
|
||||
source_description_preproc_t(files[1], codec[1], "MFX_ACCEL_MODE_VIA_D3D11",full_hd),
|
||||
source_description_preproc_t(files[1], codec[1], "MFX_ACCEL_MODE_VIA_D3D11",cif),
|
||||
source_description_preproc_t(files[2], codec[2], "", full_hd),
|
||||
source_description_preproc_t(files[2], codec[2], "", cif),
|
||||
source_description_preproc_t(files[2], codec[2], "MFX_ACCEL_MODE_VIA_D3D11", full_hd),
|
||||
source_description_preproc_t(files[2], codec[2], "MFX_ACCEL_MODE_VIA_D3D11", cif)));
|
||||
|
||||
class OneVPLSourcePerf_PP_Engine_Test : public TestPerfParams<source_description_preproc_t> {};
|
||||
|
||||
PERF_TEST_P_(OneVPLSourcePerf_PP_Engine_Test, TestPerformance)
|
||||
{
|
||||
using namespace cv::gapi::wip;
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
|
||||
const auto params = GetParam();
|
||||
source_t src = findDataFile(get<0>(params));
|
||||
codec_t type = get<1>(params);
|
||||
accel_mode_t mode = get<2>(params);
|
||||
const pp_out_param_t &required_frame_param = get<3>(params);
|
||||
|
||||
std::vector<CfgParam> cfg_params {
|
||||
CfgParam::create_implementation("MFX_IMPL_TYPE_HARDWARE"),
|
||||
};
|
||||
|
||||
if (!type.empty()) {
|
||||
cfg_params.push_back(CfgParam::create_decoder_id(type.c_str()));
|
||||
}
|
||||
|
||||
if (!mode.empty()) {
|
||||
cfg_params.push_back(CfgParam::create_acceleration_mode(mode.c_str()));
|
||||
}
|
||||
|
||||
auto device_selector = std::make_shared<CfgParamDeviceSelector>(cfg_params);
|
||||
auto source_ptr = cv::gapi::wip::make_onevpl_src(src, cfg_params, device_selector);
|
||||
|
||||
// create VPP preproc engine
|
||||
std::unique_ptr<VPLAccelerationPolicy> policy;
|
||||
if (mode == "MFX_ACCEL_MODE_VIA_D3D11") {
|
||||
policy.reset(new VPLDX11AccelerationPolicy(device_selector));
|
||||
} else if (mode.empty()){
|
||||
policy.reset(new VPLCPUAccelerationPolicy(device_selector));
|
||||
} else {
|
||||
ASSERT_TRUE(false && "Unsupported acceleration policy type");
|
||||
}
|
||||
VPPPreprocEngine preproc_engine(std::move(policy));
|
||||
cv::gapi::wip::Data out;
|
||||
TEST_CYCLE()
|
||||
{
|
||||
source_ptr->pull(out);
|
||||
cv::MediaFrame frame = cv::util::get<cv::MediaFrame>(out);
|
||||
cv::util::optional<pp_params> param = preproc_engine.is_applicable(frame);
|
||||
pp_session sess = preproc_engine.initialize_preproc(param.value(),
|
||||
required_frame_param);
|
||||
(void)preproc_engine.run_sync(sess, frame);
|
||||
}
|
||||
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(Streaming_Engine_PP, OneVPLSourcePerf_PP_Engine_Test,
|
||||
Values(source_description_preproc_t(files[0], codec[0], "", full_hd),
|
||||
source_description_preproc_t(files[0], codec[0], "", cif),
|
||||
source_description_preproc_t(files[0], codec[0], "MFX_ACCEL_MODE_VIA_D3D11", full_hd),
|
||||
source_description_preproc_t(files[0], codec[0], "MFX_ACCEL_MODE_VIA_D3D11", cif),
|
||||
source_description_preproc_t(files[1], codec[1], "", full_hd),
|
||||
source_description_preproc_t(files[1], codec[1], "", cif),
|
||||
source_description_preproc_t(files[1], codec[1], "MFX_ACCEL_MODE_VIA_D3D11",full_hd),
|
||||
source_description_preproc_t(files[1], codec[1], "MFX_ACCEL_MODE_VIA_D3D11",cif),
|
||||
source_description_preproc_t(files[2], codec[2], "", full_hd),
|
||||
source_description_preproc_t(files[2], codec[2], "", cif),
|
||||
source_description_preproc_t(files[2], codec[2], "MFX_ACCEL_MODE_VIA_D3D11", full_hd),
|
||||
source_description_preproc_t(files[2], codec[2], "MFX_ACCEL_MODE_VIA_D3D11", cif)));
|
||||
|
||||
class OneVPLSourcePerf_PP_Engine_Bypass_Test : public TestPerfParams<source_description_preproc_t> {};
|
||||
|
||||
PERF_TEST_P_(OneVPLSourcePerf_PP_Engine_Bypass_Test, TestPerformance)
|
||||
{
|
||||
using namespace cv::gapi::wip;
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
|
||||
const auto params = GetParam();
|
||||
source_t src = findDataFile(get<0>(params));
|
||||
codec_t type = get<1>(params);
|
||||
accel_mode_t mode = get<2>(params);
|
||||
const pp_out_param_t &required_frame_param = get<3>(params);
|
||||
|
||||
std::vector<CfgParam> cfg_params {
|
||||
CfgParam::create_implementation("MFX_IMPL_TYPE_HARDWARE"),
|
||||
};
|
||||
|
||||
if (!type.empty()) {
|
||||
cfg_params.push_back(CfgParam::create_decoder_id(type.c_str()));
|
||||
}
|
||||
|
||||
if (!mode.empty()) {
|
||||
cfg_params.push_back(CfgParam::create_acceleration_mode(mode.c_str()));
|
||||
}
|
||||
|
||||
auto device_selector = std::make_shared<CfgParamDeviceSelector>(cfg_params);
|
||||
auto source_ptr = cv::gapi::wip::make_onevpl_src(src, cfg_params, device_selector);
|
||||
|
||||
// create VPP preproc engine
|
||||
std::unique_ptr<VPLAccelerationPolicy> policy;
|
||||
if (mode == "MFX_ACCEL_MODE_VIA_D3D11") {
|
||||
policy.reset(new VPLDX11AccelerationPolicy(device_selector));
|
||||
} else {
|
||||
policy.reset(new VPLCPUAccelerationPolicy(device_selector));
|
||||
}
|
||||
VPPPreprocEngine preproc_engine(std::move(policy));
|
||||
cv::gapi::wip::Data out;
|
||||
TEST_CYCLE()
|
||||
{
|
||||
source_ptr->pull(out);
|
||||
cv::MediaFrame frame = cv::util::get<cv::MediaFrame>(out);
|
||||
cv::util::optional<pp_params> param = preproc_engine.is_applicable(frame);
|
||||
pp_session sess = preproc_engine.initialize_preproc(param.value(),
|
||||
required_frame_param);
|
||||
(void)preproc_engine.run_sync(sess, frame);
|
||||
}
|
||||
|
||||
SANITY_CHECK_NOTHING();
|
||||
}
|
||||
|
||||
static pp_out_param_t res_672x384 = pp_out_param_t {cv::MediaFormat::NV12,
|
||||
{672, 384}};
|
||||
static pp_out_param_t res_336x256 = pp_out_param_t {cv::MediaFormat::NV12,
|
||||
{336, 256}};
|
||||
INSTANTIATE_TEST_CASE_P(Streaming_Engine_PP_Bypass, OneVPLSourcePerf_PP_Engine_Bypass_Test,
|
||||
Values(source_description_preproc_t(files[0], codec[0], "", res_672x384),
|
||||
source_description_preproc_t(files[0], codec[0], "MFX_ACCEL_MODE_VIA_D3D11", res_672x384),
|
||||
source_description_preproc_t(files[1], codec[1], "", res_672x384),
|
||||
source_description_preproc_t(files[1], codec[1], "MFX_ACCEL_MODE_VIA_D3D11", res_672x384),
|
||||
source_description_preproc_t(files[2], codec[2], "", res_336x256),
|
||||
source_description_preproc_t(files[2], codec[2], "MFX_ACCEL_MODE_VIA_D3D11", res_336x256)));
|
||||
} // namespace opencv_test
|
||||
|
||||
#endif // HAVE_ONEVPL
|
||||
|
@ -46,8 +46,7 @@ const std::string keys =
|
||||
"{ cfg_params | <prop name>:<value>;<prop name>:<value> | Semicolon separated list of oneVPL mfxVariants which is used for configuring source (see `MFXSetConfigFilterProperty` by https://spec.oneapi.io/versions/latest/elements/oneVPL/source/index.html) }"
|
||||
"{ streaming_queue_capacity | 1 | Streaming executor queue capacity. Calculated automaticaly if 0 }"
|
||||
"{ frames_pool_size | 0 | OneVPL source applies this parameter as preallocated frames pool size}"
|
||||
"{ vpp_frames_pool_size | 0 | OneVPL source applies this parameter as preallocated frames pool size for VPP preprocessing results}"
|
||||
"{ source_preproc_enable | 0 | Turn on OneVPL source frame preprocessing using network input description instead of IE plugin preprocessing}";
|
||||
"{ vpp_frames_pool_size | 0 | OneVPL source applies this parameter as preallocated frames pool size for VPP preprocessing results}";
|
||||
|
||||
namespace {
|
||||
bool is_gpu(const std::string &device_name) {
|
||||
@ -217,7 +216,6 @@ int main(int argc, char *argv[]) {
|
||||
const auto streaming_queue_capacity = cmd.get<uint32_t>("streaming_queue_capacity");
|
||||
const auto source_decode_queue_capacity = cmd.get<uint32_t>("frames_pool_size");
|
||||
const auto source_vpp_queue_capacity = cmd.get<uint32_t>("vpp_frames_pool_size");
|
||||
const auto vpl_source_preproc_enable = cmd.get<uint32_t>("source_preproc_enable");
|
||||
const auto device_id = cmd.get<std::string>("faced");
|
||||
|
||||
// check ouput file extension
|
||||
@ -235,12 +233,6 @@ int main(int argc, char *argv[]) {
|
||||
try {
|
||||
std::string line;
|
||||
while (std::getline(params_list, line, ';')) {
|
||||
if (vpl_source_preproc_enable == 0) {
|
||||
if (line.find("vpp.") != std::string::npos) {
|
||||
// skip VPP preprocessing primitives if not requested
|
||||
continue;
|
||||
}
|
||||
}
|
||||
source_cfgs.push_back(cfg::create_from_string(line));
|
||||
}
|
||||
} catch (const std::exception& ex) {
|
||||
@ -325,23 +317,11 @@ int main(int argc, char *argv[]) {
|
||||
// set ctx_config for GPU device only - no need in case of CPU device type
|
||||
if (is_gpu(device_id)) {
|
||||
InferenceEngine::ParamMap ctx_config({{"CONTEXT_TYPE", "VA_SHARED"},
|
||||
{"VA_DEVICE", accel_device_ptr} });
|
||||
|
||||
{"VA_DEVICE", accel_device_ptr} });
|
||||
face_net.cfgContextParams(ctx_config);
|
||||
face_net.pluginConfig({{"GPU_NV12_TWO_INPUTS", "YES" }});
|
||||
|
||||
std::cout <<"/*******************************************************/\n"
|
||||
"ATTENTION: GPU Inference Engine preprocessing is not vital as expected!"
|
||||
" Please consider param \"source_preproc_enable=1\" and specify "
|
||||
" appropriated media frame transformation using oneVPL::VPP primitives"
|
||||
" which force onevpl::GSource to produce tranformed media frames."
|
||||
" For exploring list of supported transformations please find out "
|
||||
" vpp_* related stuff in"
|
||||
" gapi/include/opencv2/gapi/streaming/onevpl/cfg_params.hpp"
|
||||
" Pay attention that to obtain expected result In this case VPP "
|
||||
" transformation must match network input params."
|
||||
" Please vote/create issue about exporting network params using GAPI\n"
|
||||
"/******************************************************/" << std::endl;
|
||||
// NB: consider NV12 surface because it's one of native GPU image format
|
||||
face_net.pluginConfig({{"GPU_NV12_TWO_INPUTS", "YES" }});
|
||||
}
|
||||
#endif // HAVE_INF_ENGINE
|
||||
|
||||
@ -378,7 +358,7 @@ int main(int argc, char *argv[]) {
|
||||
cv::GFrame in;
|
||||
auto size = cv::gapi::streaming::size(in);
|
||||
auto roi = custom::LocateROI::on(size, std::cref(device_id));
|
||||
auto blob = cv::gapi::infer<custom::FaceDetector>(roi, in);
|
||||
auto blob = cv::gapi::infer<custom::FaceDetector>(in);
|
||||
cv::GArray<cv::Rect> rcs = cv::gapi::parseSSD(blob, size, 0.5f, true, true);
|
||||
auto out_frame = cv::gapi::wip::draw::renderFrame(in, custom::BBoxes::on(rcs, roi));
|
||||
auto out = cv::gapi::streaming::BGR(out_frame);
|
||||
|
@ -619,12 +619,14 @@ static void setBlob(InferenceEngine::InferRequest& req,
|
||||
static void setROIBlob(InferenceEngine::InferRequest& req,
|
||||
const std::string& layer_name,
|
||||
const IE::Blob::Ptr& blob,
|
||||
const cv::Rect &roi,
|
||||
const cv::Rect &roi,
|
||||
const IECallContext& ctx) {
|
||||
if (ctx.uu.params.device_id.find("GPU") != std::string::npos) {
|
||||
GAPI_LOG_DEBUG(nullptr, "Skip ROI blob creation for device_id: " <<
|
||||
ctx.uu.params.device_id << ", layer: " << layer_name);
|
||||
setBlob(req, layer_name, blob, ctx);
|
||||
if (ctx.uu.params.device_id.find("GPU") != std::string::npos &&
|
||||
ctx.uu.rctx) {
|
||||
GAPI_LOG_WARNING(nullptr, "ROI blob creation for device_id: " <<
|
||||
ctx.uu.params.device_id << ", layer: " << layer_name <<
|
||||
"is not supported yet");
|
||||
GAPI_Assert(false && "Unsupported ROI blob creation for GPU remote context");
|
||||
} else {
|
||||
setBlob(req, layer_name, IE::make_shared_blob(blob, toIE(roi)), ctx);
|
||||
}
|
||||
@ -1330,8 +1332,7 @@ struct InferList: public cv::detail::KernelTag {
|
||||
reqPool.execute(
|
||||
cv::gimpl::ie::RequestPool::Task {
|
||||
[ctx, rc, this_blob](InferenceEngine::InferRequest &req) {
|
||||
IE::Blob::Ptr roi_blob = IE::make_shared_blob(this_blob, toIE(rc));
|
||||
setBlob(req, ctx->uu.params.input_names[0u], roi_blob, *ctx);
|
||||
setROIBlob(req, ctx->uu.params.input_names[0u], this_blob, rc, *ctx);
|
||||
req.StartAsync();
|
||||
},
|
||||
std::bind(callback, std::placeholders::_1, pos)
|
||||
@ -1488,19 +1489,20 @@ struct InferList2: public cv::detail::KernelTag {
|
||||
for (auto in_idx : ade::util::iota(ctx->uu.params.num_in)) {
|
||||
const auto &this_vec = ctx->inArg<cv::detail::VectorRef>(in_idx+1u);
|
||||
GAPI_Assert(this_vec.size() == list_size);
|
||||
IE::Blob::Ptr this_blob;
|
||||
if (this_vec.getKind() == cv::detail::OpaqueKind::CV_RECT) {
|
||||
const auto &vec = this_vec.rref<cv::Rect>();
|
||||
this_blob = IE::make_shared_blob(blob_0, toIE(vec[list_idx]));
|
||||
setROIBlob(req, ctx->uu.params.input_names[in_idx],
|
||||
blob_0, vec[list_idx], *ctx);
|
||||
} else if (this_vec.getKind() == cv::detail::OpaqueKind::CV_MAT) {
|
||||
const auto &vec = this_vec.rref<cv::Mat>();
|
||||
const auto &mat = vec[list_idx];
|
||||
this_blob = wrapIE(mat, cv::gapi::ie::TraitAs::TENSOR);
|
||||
setBlob(req, ctx->uu.params.input_names[in_idx],
|
||||
wrapIE(mat, cv::gapi::ie::TraitAs::TENSOR),
|
||||
*ctx);
|
||||
} else {
|
||||
GAPI_Assert(false &&
|
||||
"Only Rect and Mat types are supported for infer list 2!");
|
||||
}
|
||||
setBlob(req, ctx->uu.params.input_names[in_idx], this_blob, *ctx);
|
||||
}
|
||||
req.StartAsync();
|
||||
},
|
||||
|
@ -273,7 +273,7 @@ size_t VPLCPUAccelerationPolicy::get_surface_count(pool_key_t key) const {
|
||||
}
|
||||
|
||||
cv::MediaFrame::AdapterPtr VPLCPUAccelerationPolicy::create_frame_adapter(pool_key_t key,
|
||||
mfxFrameSurface1* surface) {
|
||||
const FrameConstructorArgs ¶ms) {
|
||||
auto pool_it = pool_table.find(key);
|
||||
if (pool_it == pool_table.end()) {
|
||||
std::stringstream ss;
|
||||
@ -284,7 +284,8 @@ cv::MediaFrame::AdapterPtr VPLCPUAccelerationPolicy::create_frame_adapter(pool_k
|
||||
}
|
||||
|
||||
pool_t& requested_pool = pool_it->second;
|
||||
return cv::MediaFrame::AdapterPtr{new VPLMediaFrameCPUAdapter(requested_pool.find_by_handle(surface))};
|
||||
return cv::MediaFrame::AdapterPtr{new VPLMediaFrameCPUAdapter(requested_pool.find_by_handle(params.assoc_surface),
|
||||
params.assoc_handle)};
|
||||
}
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
|
@ -38,7 +38,7 @@ struct GAPI_EXPORTS VPLCPUAccelerationPolicy final : public VPLAccelerationPolic
|
||||
size_t get_surface_count(pool_key_t key) const override;
|
||||
|
||||
cv::MediaFrame::AdapterPtr create_frame_adapter(pool_key_t key,
|
||||
mfxFrameSurface1* surface) override;
|
||||
const FrameConstructorArgs& args) override;
|
||||
|
||||
private:
|
||||
std::map<pool_key_t, pool_t> pool_table;
|
||||
|
@ -157,12 +157,21 @@ size_t VPLDX11AccelerationPolicy::get_free_surface_count(pool_key_t) const {
|
||||
GAPI_Assert(false && "get_free_surface_count() is not implemented");
|
||||
}
|
||||
|
||||
size_t VPLDX11AccelerationPolicy::get_surface_count(pool_key_t) const {
|
||||
GAPI_Assert(false && "VPLDX11AccelerationPolicy::get_surface_count() is not implemented");
|
||||
size_t VPLDX11AccelerationPolicy::get_surface_count(pool_key_t key) const {
|
||||
auto pool_it = pool_table.find(key);
|
||||
if (pool_it == pool_table.end()) {
|
||||
std::stringstream ss;
|
||||
ss << "key is not found: " << key << ", table size: " << pool_table.size();
|
||||
const std::string& str = ss.str();
|
||||
GAPI_LOG_WARNING(nullptr, str);
|
||||
throw std::runtime_error(std::string(__FUNCTION__) + " - " + str);
|
||||
}
|
||||
return pool_it->second.total_size();
|
||||
}
|
||||
|
||||
cv::MediaFrame::AdapterPtr VPLDX11AccelerationPolicy::create_frame_adapter(pool_key_t key,
|
||||
mfxFrameSurface1* surface) {
|
||||
cv::MediaFrame::AdapterPtr
|
||||
VPLDX11AccelerationPolicy::create_frame_adapter(pool_key_t key,
|
||||
const FrameConstructorArgs ¶ms) {
|
||||
auto pool_it = pool_table.find(key);
|
||||
if (pool_it == pool_table.end()) {
|
||||
std::stringstream ss;
|
||||
@ -173,7 +182,8 @@ cv::MediaFrame::AdapterPtr VPLDX11AccelerationPolicy::create_frame_adapter(pool_
|
||||
}
|
||||
|
||||
pool_t& requested_pool = pool_it->second;
|
||||
return cv::MediaFrame::AdapterPtr{new VPLMediaFrameDX11Adapter(requested_pool.find_by_handle(surface))};
|
||||
return cv::MediaFrame::AdapterPtr{new VPLMediaFrameDX11Adapter(requested_pool.find_by_handle(params.assoc_surface),
|
||||
params.assoc_handle)};
|
||||
}
|
||||
|
||||
mfxStatus VPLDX11AccelerationPolicy::alloc_cb(mfxHDL pthis, mfxFrameAllocRequest *request,
|
||||
@ -283,12 +293,28 @@ mfxStatus VPLDX11AccelerationPolicy::on_alloc(const mfxFrameAllocRequest *reques
|
||||
desc.BindFlags = 0;
|
||||
}
|
||||
|
||||
/* NB:
|
||||
* On the one hand current OpenVINO API doesn't support texture array and
|
||||
* D3D11 API doesn't allow to address specific texture element in array.
|
||||
* On the other hand using textures array should be more performant case
|
||||
* in applications (according to community experience)
|
||||
* So, to be compliant with OV let's turn off textures array feature, but keep
|
||||
* this code in commented section to consider such "optimization" in future
|
||||
*/
|
||||
#if 0
|
||||
size_t main_textures_count = 1;
|
||||
if (D3D11_BIND_RENDER_TARGET & desc.BindFlags) {
|
||||
GAPI_LOG_DEBUG(nullptr, "Use array of testures instead of texture array");
|
||||
desc.ArraySize = 1;
|
||||
main_textures_count = request->NumFrameSuggested;
|
||||
}
|
||||
#else
|
||||
// enforcement to use array of textures
|
||||
size_t main_textures_count = request->NumFrameSuggested;
|
||||
|
||||
// enforcement to do not use texture array as subresources as part of a single texture
|
||||
desc.ArraySize = 1;
|
||||
#endif
|
||||
|
||||
// create GPU textures
|
||||
HRESULT err = S_OK;
|
||||
@ -407,6 +433,8 @@ mfxStatus VPLDX11AccelerationPolicy::on_free(mfxFrameAllocResponse *response) {
|
||||
}
|
||||
|
||||
allocation_table.erase(table_it);
|
||||
GAPI_LOG_DEBUG(nullptr, "Allocation by requested id: " << response->AllocId <<
|
||||
" has been erased");
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
} // namespace onevpl
|
||||
|
@ -49,7 +49,7 @@ struct GAPI_EXPORTS VPLDX11AccelerationPolicy final: public VPLAccelerationPolic
|
||||
size_t get_surface_count(pool_key_t key) const override;
|
||||
|
||||
cv::MediaFrame::AdapterPtr create_frame_adapter(pool_key_t key,
|
||||
mfxFrameSurface1* surface) override;
|
||||
const FrameConstructorArgs ¶ms) override;
|
||||
private:
|
||||
ID3D11Device *hw_handle;
|
||||
ID3D11DeviceContext* device_context;
|
||||
|
@ -16,13 +16,12 @@
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
#include "streaming/onevpl/onevpl_export.hpp"
|
||||
#include "streaming/onevpl/accelerators/surface/base_frame_adapter.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
|
||||
class Surface;
|
||||
struct VPLAccelerationPolicy
|
||||
{
|
||||
using device_selector_ptr_t = std::shared_ptr<IDeviceSelector>;
|
||||
@ -40,6 +39,11 @@ struct VPLAccelerationPolicy
|
||||
size_t out_buf_ptr_offset,
|
||||
size_t out_buf_ptr_size)>;
|
||||
|
||||
struct FrameConstructorArgs {
|
||||
surface_t::handle_t *assoc_surface;
|
||||
session_t assoc_handle;
|
||||
};
|
||||
|
||||
device_selector_ptr_t get_device_selector() {
|
||||
return device_selector;
|
||||
}
|
||||
@ -61,8 +65,7 @@ struct VPLAccelerationPolicy
|
||||
virtual size_t get_surface_count(pool_key_t key) const = 0;
|
||||
|
||||
virtual cv::MediaFrame::AdapterPtr create_frame_adapter(pool_key_t key,
|
||||
mfxFrameSurface1* surface) = 0;
|
||||
private:
|
||||
const FrameConstructorArgs ¶ms) = 0;
|
||||
device_selector_ptr_t device_selector;
|
||||
};
|
||||
} // namespace onevpl
|
||||
|
@ -0,0 +1,70 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#include "streaming/onevpl/accelerators/surface/base_frame_adapter.hpp"
|
||||
#include "streaming/onevpl/accelerators/surface/surface.hpp"
|
||||
#include "logger.hpp"
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
#include "streaming/onevpl/onevpl_export.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
BaseFrameAdapter::BaseFrameAdapter(std::shared_ptr<Surface> surface, SessionHandle assoc_handle):
|
||||
parent_surface_ptr(surface), parent_handle(assoc_handle) {
|
||||
GAPI_Assert(parent_surface_ptr && "Surface is nullptr");
|
||||
GAPI_Assert(parent_handle && "mfxSession is nullptr");
|
||||
|
||||
const Surface::info_t& info = parent_surface_ptr->get_info();
|
||||
GAPI_LOG_DEBUG(nullptr, "surface: " << parent_surface_ptr->get_handle() <<
|
||||
", w: " << info.Width << ", h: " << info.Height <<
|
||||
", p: " << parent_surface_ptr->get_data().Pitch <<
|
||||
", frame id: " << reinterpret_cast<void*>(this));
|
||||
switch(info.FourCC) {
|
||||
case MFX_FOURCC_I420:
|
||||
throw std::runtime_error("MediaFrame doesn't support I420 type");
|
||||
break;
|
||||
case MFX_FOURCC_NV12:
|
||||
frame_desc.fmt = MediaFormat::NV12;
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("MediaFrame unknown 'fmt' type: " + std::to_string(info.FourCC));
|
||||
}
|
||||
|
||||
frame_desc.size = cv::Size{info.Width, info.Height};
|
||||
parent_surface_ptr->obtain_lock();
|
||||
}
|
||||
|
||||
BaseFrameAdapter::~BaseFrameAdapter() {
|
||||
// Each BaseFrameAdapter releases mfx surface counter
|
||||
// The last BaseFrameAdapter releases shared Surface pointer
|
||||
// The last surface pointer releases workspace memory
|
||||
GAPI_LOG_DEBUG(nullptr, "destroy frame id: " << reinterpret_cast<void*>(this));
|
||||
parent_surface_ptr->release_lock();
|
||||
}
|
||||
|
||||
const std::shared_ptr<Surface>& BaseFrameAdapter::get_surface() const {
|
||||
return parent_surface_ptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<Surface> BaseFrameAdapter::surface() {
|
||||
return parent_surface_ptr;
|
||||
}
|
||||
|
||||
const BaseFrameAdapter::SessionHandle BaseFrameAdapter::get_session_handle() const {
|
||||
return parent_handle;
|
||||
}
|
||||
|
||||
cv::GFrameDesc BaseFrameAdapter::meta() const {
|
||||
return frame_desc;
|
||||
}
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
#endif // HAVE_ONEVPL
|
@ -0,0 +1,43 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#ifndef GAPI_STREAMING_ONEVPL_ACCELERATORS_SURFACE_BASE_FRAME_ADAPTER_HPP
|
||||
#define GAPI_STREAMING_ONEVPL_ACCELERATORS_SURFACE_BASE_FRAME_ADAPTER_HPP
|
||||
#include <memory>
|
||||
|
||||
#include <opencv2/gapi/media.hpp>
|
||||
#include "streaming/onevpl/accelerators/surface/surface.hpp"
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
class BaseFrameAdapter : public cv::MediaFrame::IAdapter {
|
||||
public:
|
||||
using SessionHandle = mfxSession;
|
||||
|
||||
const std::shared_ptr<Surface>& get_surface() const;
|
||||
const SessionHandle get_session_handle() const;
|
||||
|
||||
cv::GFrameDesc meta() const override;
|
||||
protected:
|
||||
BaseFrameAdapter(std::shared_ptr<Surface> assoc_surface, SessionHandle assoc_handle);
|
||||
~BaseFrameAdapter();
|
||||
std::shared_ptr<Surface> surface();
|
||||
|
||||
std::shared_ptr<Surface> parent_surface_ptr;
|
||||
SessionHandle parent_handle;
|
||||
GFrameDesc frame_desc;
|
||||
};
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // HAVE_ONEVPL
|
||||
#endif // GAPI_STREAMING_ONEVPL_ACCELERATORS_SURFACE_BASE_FRAME_ADAPTER_HPP
|
@ -16,46 +16,16 @@ namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
|
||||
VPLMediaFrameCPUAdapter::VPLMediaFrameCPUAdapter(std::shared_ptr<Surface> surface):
|
||||
parent_surface_ptr(surface) {
|
||||
|
||||
GAPI_Assert(parent_surface_ptr && "Surface is nullptr");
|
||||
GAPI_LOG_DEBUG(nullptr, "surface: " << parent_surface_ptr->get_handle() <<
|
||||
", w: " << parent_surface_ptr->get_info().Width <<
|
||||
", h: " << parent_surface_ptr->get_info().Height <<
|
||||
", p: " << parent_surface_ptr->get_data().Pitch);
|
||||
const Surface::info_t& info = parent_surface_ptr->get_info();
|
||||
switch(info.FourCC)
|
||||
{
|
||||
case MFX_FOURCC_I420:
|
||||
throw std::runtime_error("MediaFrame doesn't support I420 type");
|
||||
break;
|
||||
case MFX_FOURCC_NV12:
|
||||
frame_desc.fmt = MediaFormat::NV12;
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("MediaFrame unknown 'fmt' type: " + std::to_string(info.FourCC));
|
||||
}
|
||||
|
||||
frame_desc.size = cv::Size{info.Width, info.Height};
|
||||
parent_surface_ptr->obtain_lock();
|
||||
VPLMediaFrameCPUAdapter::VPLMediaFrameCPUAdapter(std::shared_ptr<Surface> surface,
|
||||
SessionHandle assoc_handle):
|
||||
BaseFrameAdapter(surface, assoc_handle) {
|
||||
}
|
||||
|
||||
VPLMediaFrameCPUAdapter::~VPLMediaFrameCPUAdapter() {
|
||||
|
||||
// Each VPLMediaFrameCPUAdapter releases mfx surface counter
|
||||
// The last VPLMediaFrameCPUAdapter releases shared Surface pointer
|
||||
// The last surface pointer releases workspace memory
|
||||
parent_surface_ptr->release_lock();
|
||||
}
|
||||
|
||||
cv::GFrameDesc VPLMediaFrameCPUAdapter::meta() const {
|
||||
return frame_desc;
|
||||
}
|
||||
VPLMediaFrameCPUAdapter::~VPLMediaFrameCPUAdapter() = default;
|
||||
|
||||
MediaFrame::View VPLMediaFrameCPUAdapter::access(MediaFrame::Access) {
|
||||
const Surface::data_t& data = parent_surface_ptr->get_data();
|
||||
const Surface::info_t& info = parent_surface_ptr->get_info();
|
||||
const Surface::data_t& data = get_surface()->get_data();
|
||||
const Surface::info_t& info = get_surface()->get_info();
|
||||
using stride_t = typename cv::MediaFrame::View::Strides::value_type;
|
||||
|
||||
stride_t pitch = static_cast<stride_t>(data.Pitch);
|
||||
|
@ -6,10 +6,8 @@
|
||||
|
||||
#ifndef GAPI_STREAMING_ONEVPL_ACCELERATORS_SURFACE_CPU_FRAME_ADAPTER_HPP
|
||||
#define GAPI_STREAMING_ONEVPL_ACCELERATORS_SURFACE_CPU_FRAME_ADAPTER_HPP
|
||||
#include <memory>
|
||||
|
||||
#include <opencv2/gapi/media.hpp>
|
||||
#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
|
||||
#include "streaming/onevpl/accelerators/surface/base_frame_adapter.hpp"
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
|
||||
@ -18,22 +16,20 @@ namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
|
||||
class Surface;
|
||||
class VPLMediaFrameCPUAdapter : public cv::MediaFrame::IAdapter {
|
||||
class VPLMediaFrameCPUAdapter : public BaseFrameAdapter {
|
||||
public:
|
||||
// GAPI_EXPORTS for tests
|
||||
GAPI_EXPORTS explicit VPLMediaFrameCPUAdapter(std::shared_ptr<Surface> assoc_surface);
|
||||
GAPI_EXPORTS explicit VPLMediaFrameCPUAdapter(std::shared_ptr<Surface> assoc_surface,
|
||||
SessionHandle assoc_handle);
|
||||
GAPI_EXPORTS ~VPLMediaFrameCPUAdapter();
|
||||
cv::GFrameDesc meta() const override;
|
||||
|
||||
MediaFrame::View access(MediaFrame::Access) override;
|
||||
|
||||
// The default implementation does nothing
|
||||
cv::util::any blobParams() const override;
|
||||
void serialize(cv::gapi::s11n::IOStream&) override;
|
||||
void deserialize(cv::gapi::s11n::IIStream&) override;
|
||||
private:
|
||||
std::shared_ptr<Surface> parent_surface_ptr;
|
||||
GFrameDesc frame_desc;
|
||||
|
||||
};
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
|
@ -40,117 +40,71 @@ void unlock_mid(mfxMemId mid, mfxFrameData &data, MediaFrame::Access mode) {
|
||||
}
|
||||
}
|
||||
|
||||
VPLMediaFrameDX11Adapter::VPLMediaFrameDX11Adapter(std::shared_ptr<Surface> surface):
|
||||
parent_surface_ptr(surface) {
|
||||
GAPI_Assert(parent_surface_ptr && "Surface is nullptr");
|
||||
|
||||
const Surface::info_t& info = parent_surface_ptr->get_info();
|
||||
Surface::data_t& data = parent_surface_ptr->get_data();
|
||||
GAPI_LOG_DEBUG(nullptr, "surface: " << parent_surface_ptr->get_handle() <<
|
||||
", w: " << info.Width << ", h: " << info.Height <<
|
||||
", p: " << data.Pitch <<
|
||||
", frame id: " << reinterpret_cast<void*>(this));
|
||||
switch(info.FourCC)
|
||||
{
|
||||
case MFX_FOURCC_I420:
|
||||
throw std::runtime_error("MediaFrame doesn't support I420 type");
|
||||
break;
|
||||
case MFX_FOURCC_NV12:
|
||||
frame_desc.fmt = MediaFormat::NV12;
|
||||
break;
|
||||
default:
|
||||
throw std::runtime_error("MediaFrame unknown 'fmt' type: " + std::to_string(info.FourCC));
|
||||
}
|
||||
frame_desc.size = cv::Size{info.Width, info.Height};
|
||||
VPLMediaFrameDX11Adapter::VPLMediaFrameDX11Adapter(std::shared_ptr<Surface> assoc_surface,
|
||||
SessionHandle assoc_handle):
|
||||
BaseFrameAdapter(assoc_surface, assoc_handle) {
|
||||
Surface::data_t& data = assoc_surface->get_data();
|
||||
|
||||
LockAdapter* alloc_data = reinterpret_cast<LockAdapter*>(data.MemId);
|
||||
alloc_data->set_adaptee(this);
|
||||
|
||||
parent_surface_ptr->obtain_lock();
|
||||
}
|
||||
|
||||
VPLMediaFrameDX11Adapter::~VPLMediaFrameDX11Adapter() {
|
||||
// Each VPLMediaFrameDX11Adapter releases mfx surface counter
|
||||
// The last VPLMediaFrameDX11Adapter releases shared Surface pointer
|
||||
// The last surface pointer releases workspace memory
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "destroy frame id: " << reinterpret_cast<void*>(this));
|
||||
|
||||
Surface::data_t& data = parent_surface_ptr->get_data();
|
||||
Surface::data_t& data = surface()->get_data();
|
||||
LockAdapter* alloc_data = reinterpret_cast<LockAdapter*>(data.MemId);
|
||||
alloc_data->set_adaptee(nullptr);
|
||||
|
||||
parent_surface_ptr->release_lock();
|
||||
}
|
||||
|
||||
cv::GFrameDesc VPLMediaFrameDX11Adapter::meta() const {
|
||||
return frame_desc;
|
||||
}
|
||||
|
||||
MediaFrame::View VPLMediaFrameDX11Adapter::access(MediaFrame::Access mode) {
|
||||
Surface::data_t& data = parent_surface_ptr->get_data();
|
||||
const Surface::info_t& info = parent_surface_ptr->get_info();
|
||||
// NB: make copy for some copyable object, because access release may be happened
|
||||
// after source/pool destruction, so we need a copy
|
||||
auto surface_ptr_copy = surface();
|
||||
Surface::data_t& data = surface_ptr_copy->get_data();
|
||||
const Surface::info_t& info = surface_ptr_copy->get_info();
|
||||
void* frame_id = reinterpret_cast<void*>(this);
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "START lock frame in surface: " << parent_surface_ptr->get_handle() <<
|
||||
GAPI_LOG_DEBUG(nullptr, "START lock frame in surface: " << surface_ptr_copy->get_handle() <<
|
||||
", frame id: " << frame_id);
|
||||
|
||||
// lock MT
|
||||
lock_mid(data.MemId, data, mode);
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "FINISH lock frame in surface: " << parent_surface_ptr->get_handle() <<
|
||||
GAPI_LOG_DEBUG(nullptr, "FINISH lock frame in surface: " << surface_ptr_copy->get_handle() <<
|
||||
", frame id: " << frame_id);
|
||||
using stride_t = typename cv::MediaFrame::View::Strides::value_type;
|
||||
stride_t pitch = static_cast<stride_t>(data.Pitch);
|
||||
|
||||
// NB: make copy for some copyable object, because access release may be happened
|
||||
// after source/pool destruction, so we need a copy
|
||||
auto parent_surface_ptr_copy = parent_surface_ptr;
|
||||
auto release_guard = [surface_ptr_copy, frame_id, mode] () {
|
||||
surface_ptr_copy->obtain_lock();
|
||||
|
||||
auto& data = surface_ptr_copy->get_data();
|
||||
GAPI_LOG_DEBUG(nullptr, "START unlock frame in surface: " << surface_ptr_copy->get_handle() <<
|
||||
", frame id: " << frame_id);
|
||||
unlock_mid(data.MemId, data, mode);
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "FINISH unlock frame in surface: " << surface_ptr_copy->get_handle() <<
|
||||
", frame id: " << frame_id);
|
||||
surface_ptr_copy->release_lock();
|
||||
};
|
||||
|
||||
switch(info.FourCC) {
|
||||
case MFX_FOURCC_I420:
|
||||
{
|
||||
GAPI_Assert(data.Y && data.U && data.V && "MFX_FOURCC_I420 frame data is nullptr");
|
||||
cv::MediaFrame::View::Ptrs pp = { data.Y, data.U, data.V, nullptr };
|
||||
cv::MediaFrame::View::Strides ss = { pitch, pitch / 2, pitch / 2, 0u };
|
||||
return cv::MediaFrame::View(std::move(pp), std::move(ss),
|
||||
[parent_surface_ptr_copy,
|
||||
frame_id, mode] () {
|
||||
parent_surface_ptr_copy->obtain_lock();
|
||||
|
||||
auto& data = parent_surface_ptr_copy->get_data();
|
||||
GAPI_LOG_DEBUG(nullptr, "START unlock frame in surface: " << parent_surface_ptr_copy->get_handle() <<
|
||||
", frame id: " << frame_id);
|
||||
unlock_mid(data.MemId, data, mode);
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "FINISH unlock frame in surface: " << parent_surface_ptr_copy->get_handle() <<
|
||||
", frame id: " << frame_id);
|
||||
|
||||
parent_surface_ptr_copy->release_lock();
|
||||
});
|
||||
return cv::MediaFrame::View(std::move(pp), std::move(ss), release_guard);
|
||||
}
|
||||
case MFX_FOURCC_NV12:
|
||||
{
|
||||
if (!data.Y || !data.UV) {
|
||||
GAPI_LOG_WARNING(nullptr, "Empty data detected!!! for surface: " << parent_surface_ptr->get_handle() <<
|
||||
GAPI_LOG_WARNING(nullptr, "Empty data detected!!! for surface: " << surface_ptr_copy->get_handle() <<
|
||||
", frame id: " << frame_id);
|
||||
}
|
||||
GAPI_Assert(data.Y && data.UV && "MFX_FOURCC_NV12 frame data is nullptr");
|
||||
cv::MediaFrame::View::Ptrs pp = { data.Y, data.UV, nullptr, nullptr };
|
||||
cv::MediaFrame::View::Strides ss = { pitch, pitch, 0u, 0u };
|
||||
return cv::MediaFrame::View(std::move(pp), std::move(ss),
|
||||
[parent_surface_ptr_copy,
|
||||
frame_id, mode] () {
|
||||
parent_surface_ptr_copy->obtain_lock();
|
||||
|
||||
auto& data = parent_surface_ptr_copy->get_data();
|
||||
GAPI_LOG_DEBUG(nullptr, "START unlock frame in surface: " << parent_surface_ptr_copy->get_handle() <<
|
||||
", frame id: " << frame_id);
|
||||
unlock_mid(data.MemId, data, mode);
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "FINISH unlock frame in surface: " << parent_surface_ptr_copy->get_handle() <<
|
||||
", frame id: " << frame_id);
|
||||
parent_surface_ptr_copy->release_lock();
|
||||
});
|
||||
return cv::MediaFrame::View(std::move(pp), std::move(ss), release_guard);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -162,8 +116,9 @@ cv::util::any VPLMediaFrameDX11Adapter::blobParams() const {
|
||||
/*GAPI_Assert(false && "VPLMediaFrameDX11Adapter::blobParams() is not fully integrated"
|
||||
"in OpenVINO InferenceEngine and would be temporary disable.");*/
|
||||
#ifdef HAVE_INF_ENGINE
|
||||
Surface::data_t& data = parent_surface_ptr->get_data();
|
||||
const Surface::info_t& info = parent_surface_ptr->get_info();
|
||||
auto surface_ptr_copy = get_surface();
|
||||
Surface::data_t& data = surface_ptr_copy->get_data();
|
||||
const Surface::info_t& info = surface_ptr_copy->get_info();
|
||||
NativeHandleAdapter* native_handle_getter = reinterpret_cast<NativeHandleAdapter*>(data.MemId);
|
||||
|
||||
mfxHDLPair handle{};
|
||||
|
@ -8,9 +8,7 @@
|
||||
#define GAPI_STREAMING_ONEVPL_ACCELERATORS_SURFACE_DX11_FRAME_ADAPTER_HPP
|
||||
#include <memory>
|
||||
|
||||
#include <opencv2/gapi/media.hpp>
|
||||
#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
|
||||
|
||||
#include "streaming/onevpl/accelerators/surface/base_frame_adapter.hpp"
|
||||
#include "streaming/onevpl/accelerators/utils/shared_lock.hpp"
|
||||
#ifdef HAVE_ONEVPL
|
||||
#include "streaming/onevpl/onevpl_export.hpp"
|
||||
@ -30,15 +28,13 @@ namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
|
||||
class Surface;
|
||||
class VPLMediaFrameDX11Adapter final: public cv::MediaFrame::IAdapter,
|
||||
class VPLMediaFrameDX11Adapter final: public BaseFrameAdapter,
|
||||
public SharedLock {
|
||||
public:
|
||||
// GAPI_EXPORTS for tests
|
||||
GAPI_EXPORTS VPLMediaFrameDX11Adapter(std::shared_ptr<Surface> assoc_surface);
|
||||
GAPI_EXPORTS VPLMediaFrameDX11Adapter(std::shared_ptr<Surface> assoc_surface,
|
||||
SessionHandle assoc_handle);
|
||||
GAPI_EXPORTS ~VPLMediaFrameDX11Adapter();
|
||||
cv::GFrameDesc meta() const override;
|
||||
MediaFrame::View access(MediaFrame::Access) override;
|
||||
|
||||
// The default implementation does nothing
|
||||
@ -48,9 +44,7 @@ public:
|
||||
|
||||
static DXGI_FORMAT get_dx11_color_format(uint32_t mfx_fourcc);
|
||||
private:
|
||||
std::shared_ptr<Surface> parent_surface_ptr;
|
||||
mfxFrameAllocator allocator;
|
||||
GFrameDesc frame_desc;
|
||||
};
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
|
@ -94,75 +94,22 @@ std::vector<ValueType> get_params_from_string(const std::string& str) {
|
||||
ret.push_back(creator.create<mfxU32>(name, cstr_to_mfx_accel_mode(value.c_str())));
|
||||
} else if (name == "mfxImplDescription.ApiVersion.Version") {
|
||||
ret.push_back(creator.create<mfxU32>(name, cstr_to_mfx_version(value.c_str())));
|
||||
} else if (name == CfgParam::frames_pool_size_name()) {
|
||||
} else if ((name == CfgParam::frames_pool_size_name()) || (name == CfgParam::vpp_frames_pool_size_name())) {
|
||||
ret.push_back(creator.create(name, strtoull_or_throw(value.c_str()), false));
|
||||
} else if (name == CfgParam::vpp_frames_pool_size_name()) {
|
||||
ret.push_back(creator.create(name, strtoull_or_throw(value.c_str()), false));
|
||||
} else if (name == CfgParam::vpp_in_width_name()) {
|
||||
} else if ((name == CfgParam::vpp_in_width_name()) || (name == CfgParam::vpp_in_height_name()) ||
|
||||
(name == CfgParam::vpp_in_crop_w_name()) || (name == CfgParam::vpp_in_crop_h_name()) ||
|
||||
(name == CfgParam::vpp_in_crop_x_name()) || (name == CfgParam::vpp_in_crop_y_name()) ||
|
||||
(name == CfgParam::vpp_out_chroma_format_name()) ||
|
||||
(name == CfgParam::vpp_out_width_name()) || (name == CfgParam::vpp_out_height_name()) ||
|
||||
(name == CfgParam::vpp_out_crop_w_name()) || (name == CfgParam::vpp_out_crop_h_name()) ||
|
||||
(name == CfgParam::vpp_out_crop_x_name()) || (name == CfgParam::vpp_out_crop_y_name()) ||
|
||||
(name == CfgParam::vpp_out_pic_struct_name())) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_in_height_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_in_crop_w_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_in_crop_h_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_in_crop_x_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_in_crop_y_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_fourcc_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint32_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_chroma_format_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_width_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_height_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_crop_w_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_crop_h_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_crop_x_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_crop_y_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_pic_struct_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint16_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_framerate_n_name()) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint32_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
} else if (name == CfgParam::vpp_out_framerate_d_name()) {
|
||||
} else if ((name == CfgParam::vpp_out_fourcc_name()) ||
|
||||
(name == CfgParam::vpp_out_framerate_n_name()) ||
|
||||
(name == CfgParam::vpp_out_framerate_d_name())) {
|
||||
ret.push_back(creator.create(name,
|
||||
static_cast<uint32_t>(strtoul_or_throw(value.c_str())),
|
||||
false));
|
||||
|
@ -83,11 +83,8 @@ VPLLegacyDecodeEngine::VPLLegacyDecodeEngine(std::unique_ptr<VPLAccelerationPoli
|
||||
// enqueue decode operation with current session surface
|
||||
my_sess.last_status =
|
||||
MFXVideoDECODE_DecodeFrameAsync(my_sess.session,
|
||||
(my_sess.data_provider || (my_sess.stream && my_sess.stream->DataLength))
|
||||
? my_sess.stream.get()
|
||||
|
||||
: nullptr, /* No more data to read, start decode draining mode*/
|
||||
my_sess.procesing_surface_ptr.lock()->get_handle(),
|
||||
my_sess.get_mfx_bitstream_ptr(),
|
||||
my_sess.processing_surface_ptr.lock()->get_handle(),
|
||||
&sync_pair.second,
|
||||
&sync_pair.first);
|
||||
|
||||
@ -98,12 +95,12 @@ VPLLegacyDecodeEngine::VPLLegacyDecodeEngine(std::unique_ptr<VPLAccelerationPoli
|
||||
my_sess.last_status == MFX_WRN_DEVICE_BUSY) {
|
||||
try {
|
||||
if (my_sess.last_status == MFX_ERR_MORE_SURFACE) {
|
||||
my_sess.swap_surface(*this);
|
||||
my_sess.swap_decode_surface(*this);
|
||||
}
|
||||
my_sess.last_status =
|
||||
MFXVideoDECODE_DecodeFrameAsync(my_sess.session,
|
||||
my_sess.stream.get(),
|
||||
my_sess.procesing_surface_ptr.lock()->get_handle(),
|
||||
my_sess.get_mfx_bitstream_ptr(),
|
||||
my_sess.processing_surface_ptr.lock()->get_handle(),
|
||||
&sync_pair.second,
|
||||
&sync_pair.first);
|
||||
|
||||
@ -282,22 +279,19 @@ VPLLegacyDecodeEngine::initialize_session(mfxSession mfx_session,
|
||||
|
||||
sess_ptr->init_surface_pool(param.decode_pool_key);
|
||||
// prepare working decode surface
|
||||
sess_ptr->swap_surface(*this);
|
||||
sess_ptr->swap_decode_surface(*this);
|
||||
return sess_ptr;
|
||||
}
|
||||
|
||||
ProcessingEngineBase::ExecutionStatus VPLLegacyDecodeEngine::execute_op(operation_t& op, EngineSession& sess) {
|
||||
return op(sess);
|
||||
}
|
||||
|
||||
void VPLLegacyDecodeEngine::on_frame_ready(LegacyDecodeSession& sess,
|
||||
mfxFrameSurface1* ready_surface)
|
||||
{
|
||||
GAPI_LOG_DEBUG(nullptr, "[" << sess.session << "], frame ready");
|
||||
|
||||
// manage memory ownership rely on acceleration policy
|
||||
VPLAccelerationPolicy::FrameConstructorArgs args{ready_surface, sess.session};
|
||||
auto frame_adapter = acceleration_policy->create_frame_adapter(sess.decoder_pool_id,
|
||||
ready_surface);
|
||||
args);
|
||||
ready_frames.emplace(cv::MediaFrame(std::move(frame_adapter)), sess.generate_frame_meta());
|
||||
|
||||
// pop away synced out object
|
||||
@ -313,7 +307,7 @@ ProcessingEngineBase::ExecutionStatus VPLLegacyDecodeEngine::process_error(mfxSt
|
||||
{
|
||||
// prepare sync object for new surface
|
||||
try {
|
||||
sess.swap_surface(*this);
|
||||
sess.swap_decode_surface(*this);
|
||||
return ExecutionStatus::Continue;
|
||||
} catch (const std::runtime_error& ex) {
|
||||
GAPI_LOG_WARNING(nullptr, "[" << sess.session << "] error: " << ex.what());
|
||||
@ -334,7 +328,7 @@ ProcessingEngineBase::ExecutionStatus VPLLegacyDecodeEngine::process_error(mfxSt
|
||||
// This applies to external memory allocations and should not be expected for
|
||||
// a simple internal allocation case like this
|
||||
try {
|
||||
sess.swap_surface(*this);
|
||||
sess.swap_decode_surface(*this);
|
||||
return ExecutionStatus::Continue;
|
||||
} catch (const std::runtime_error& ex) {
|
||||
GAPI_LOG_WARNING(nullptr, "[" << sess.session << "] error: " << ex.what());
|
||||
@ -358,9 +352,7 @@ ProcessingEngineBase::ExecutionStatus VPLLegacyDecodeEngine::process_error(mfxSt
|
||||
// The decoder detected a new sequence header in the bitstream.
|
||||
// Video parameters may have changed.
|
||||
// In external memory allocation case, might need to reallocate the output surface
|
||||
/*GAPI_DbgAssert(false && "VPLLegacyDecodeEngine::process_error - "
|
||||
"MFX_WRN_VIDEO_PARAM_CHANGED is not processed");
|
||||
*/
|
||||
GAPI_LOG_WARNING(nullptr, "[" << sess.session << "] got MFX_WRN_VIDEO_PARAM_CHANGED");
|
||||
return ExecutionStatus::Continue;
|
||||
break;
|
||||
case MFX_ERR_INCOMPATIBLE_VIDEO_PARAM:
|
||||
@ -380,7 +372,7 @@ ProcessingEngineBase::ExecutionStatus VPLLegacyDecodeEngine::process_error(mfxSt
|
||||
break;
|
||||
case MFX_WRN_IN_EXECUTION:
|
||||
try {
|
||||
sess.swap_surface(*this);
|
||||
sess.swap_decode_surface(*this);
|
||||
return ExecutionStatus::Continue;
|
||||
} catch (const std::runtime_error& ex) {
|
||||
GAPI_LOG_WARNING(nullptr, "[" << sess.session << "] error: " << ex.what());
|
||||
|
@ -41,7 +41,6 @@ protected:
|
||||
const std::vector<CfgParam>& cfg_params,
|
||||
std::shared_ptr<IDataProvider> provider);
|
||||
|
||||
ExecutionStatus execute_op(operation_t& op, EngineSession& sess) override;
|
||||
ExecutionStatus process_error(mfxStatus status, LegacyDecodeSession& sess);
|
||||
|
||||
void on_frame_ready(LegacyDecodeSession& sess,
|
||||
|
@ -22,10 +22,11 @@ namespace onevpl {
|
||||
LegacyDecodeSession::LegacyDecodeSession(mfxSession sess,
|
||||
DecoderParams&& decoder_param,
|
||||
std::shared_ptr<IDataProvider> provider) :
|
||||
EngineSession(sess, std::move(decoder_param.stream)),
|
||||
EngineSession(sess),
|
||||
mfx_decoder_param(std::move(decoder_param.param)),
|
||||
data_provider(std::move(provider)),
|
||||
procesing_surface_ptr(),
|
||||
stream(std::move(decoder_param.stream)),
|
||||
processing_surface_ptr(),
|
||||
sync_queue(),
|
||||
decoded_frames_count()
|
||||
{
|
||||
@ -37,25 +38,10 @@ LegacyDecodeSession::~LegacyDecodeSession()
|
||||
MFXVideoDECODE_Close(session);
|
||||
}
|
||||
|
||||
void LegacyDecodeSession::swap_surface(VPLLegacyDecodeEngine& engine) {
|
||||
void LegacyDecodeSession::swap_decode_surface(VPLLegacyDecodeEngine& engine) {
|
||||
VPLAccelerationPolicy* acceleration_policy = engine.get_accel();
|
||||
GAPI_Assert(acceleration_policy && "Empty acceleration_policy");
|
||||
try {
|
||||
auto cand = acceleration_policy->get_free_surface(decoder_pool_id).lock();
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "[" << session << "] swap surface"
|
||||
", old: " << (!procesing_surface_ptr.expired()
|
||||
? procesing_surface_ptr.lock()->get_handle()
|
||||
: nullptr) <<
|
||||
", new: "<< cand->get_handle());
|
||||
|
||||
procesing_surface_ptr = cand;
|
||||
} catch (const std::runtime_error& ex) {
|
||||
GAPI_LOG_WARNING(nullptr, "[" << session << "] error: " << ex.what());
|
||||
|
||||
// Delegate exception processing on caller
|
||||
throw;
|
||||
}
|
||||
request_free_surface(session, decoder_pool_id, *acceleration_policy, processing_surface_ptr);
|
||||
}
|
||||
|
||||
void LegacyDecodeSession::init_surface_pool(VPLAccelerationPolicy::pool_key_t key) {
|
||||
@ -77,6 +63,11 @@ Data::Meta LegacyDecodeSession::generate_frame_meta() {
|
||||
const mfxFrameInfo& LegacyDecodeSession::get_video_param() const {
|
||||
return mfx_decoder_param.mfx.FrameInfo;
|
||||
}
|
||||
|
||||
IDataProvider::mfx_bitstream *LegacyDecodeSession::get_mfx_bitstream_ptr() {
|
||||
return (data_provider || (stream && stream->DataLength)) ?
|
||||
stream.get() : nullptr;
|
||||
}
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include <opencv2/gapi/streaming/meta.hpp>
|
||||
|
||||
#include "streaming/onevpl/engine/engine_session.hpp"
|
||||
#include "streaming/onevpl/accelerators/accel_policy_interface.hpp"
|
||||
#ifdef HAVE_ONEVPL
|
||||
#include "streaming/onevpl/onevpl_export.hpp"
|
||||
|
||||
@ -21,11 +20,7 @@ namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
|
||||
struct IDataProvider;
|
||||
class Surface;
|
||||
struct VPLAccelerationPolicy;
|
||||
|
||||
class GAPI_EXPORTS LegacyDecodeSession : public EngineSession {
|
||||
public:
|
||||
friend class VPLLegacyDecodeEngine;
|
||||
@ -35,19 +30,22 @@ public:
|
||||
~LegacyDecodeSession();
|
||||
using EngineSession::EngineSession;
|
||||
|
||||
void swap_surface(VPLLegacyDecodeEngine& engine);
|
||||
void swap_decode_surface(VPLLegacyDecodeEngine& engine);
|
||||
void init_surface_pool(VPLAccelerationPolicy::pool_key_t key);
|
||||
|
||||
Data::Meta generate_frame_meta();
|
||||
virtual const mfxFrameInfo& get_video_param() const override;
|
||||
|
||||
IDataProvider::mfx_bitstream *get_mfx_bitstream_ptr();
|
||||
private:
|
||||
mfxVideoParam mfx_decoder_param;
|
||||
std::shared_ptr<IDataProvider> data_provider;
|
||||
VPLAccelerationPolicy::pool_key_t decoder_pool_id;
|
||||
mfxFrameAllocRequest request;
|
||||
|
||||
std::shared_ptr<IDataProvider> data_provider;
|
||||
std::shared_ptr<IDataProvider::mfx_bitstream> stream;
|
||||
|
||||
protected:
|
||||
std::weak_ptr<Surface> procesing_surface_ptr;
|
||||
std::weak_ptr<Surface> processing_surface_ptr;
|
||||
using op_handle_t = std::pair<mfxSyncPoint, mfxFrameSurface1*>;
|
||||
std::queue<op_handle_t> sync_queue;
|
||||
|
||||
|
@ -14,8 +14,10 @@ namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
|
||||
EngineSession::EngineSession(mfxSession sess, std::shared_ptr<IDataProvider::mfx_bitstream>&& str) :
|
||||
session(sess), stream(std::move(str)) {}
|
||||
EngineSession::EngineSession(mfxSession sess) :
|
||||
session(sess) {
|
||||
}
|
||||
|
||||
EngineSession::~EngineSession()
|
||||
{
|
||||
GAPI_LOG_INFO(nullptr, "Close session: " << session);
|
||||
@ -26,6 +28,31 @@ std::string EngineSession::error_code_to_str() const
|
||||
{
|
||||
return mfxstatus_to_string(last_status);
|
||||
}
|
||||
|
||||
void EngineSession::request_free_surface(mfxSession session,
|
||||
VPLAccelerationPolicy::pool_key_t key,
|
||||
VPLAccelerationPolicy &acceleration_policy,
|
||||
std::weak_ptr<Surface> &surface_to_exchange,
|
||||
bool reset_if_not_found) {
|
||||
try {
|
||||
auto cand = acceleration_policy.get_free_surface(key).lock();
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "[" << session << "] swap surface"
|
||||
", old: " << (!surface_to_exchange.expired()
|
||||
? surface_to_exchange.lock()->get_handle()
|
||||
: nullptr) <<
|
||||
", new: "<< cand->get_handle());
|
||||
|
||||
surface_to_exchange = cand;
|
||||
} catch (const std::runtime_error& ex) {
|
||||
GAPI_LOG_WARNING(nullptr, "[" << session << "] error: " << ex.what());
|
||||
if (reset_if_not_found) {
|
||||
surface_to_exchange.reset();
|
||||
}
|
||||
// Delegate exception processing on caller side
|
||||
throw;
|
||||
}
|
||||
}
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
|
@ -16,6 +16,8 @@
|
||||
|
||||
#include "opencv2/gapi/own/exports.hpp" // GAPI_EXPORTS
|
||||
#include <opencv2/gapi/streaming/onevpl/data_provider_interface.hpp>
|
||||
#include "streaming/onevpl/data_provider_defines.hpp"
|
||||
#include "streaming/onevpl/accelerators/accel_policy_interface.hpp"
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
#include "streaming/onevpl/onevpl_export.hpp"
|
||||
@ -38,15 +40,19 @@ struct GAPI_EXPORTS TranscoderParams {
|
||||
|
||||
struct GAPI_EXPORTS EngineSession {
|
||||
mfxSession session;
|
||||
std::shared_ptr<IDataProvider::mfx_bitstream> stream;
|
||||
mfxSyncPoint sync;
|
||||
mfxStatus last_status;
|
||||
|
||||
EngineSession(mfxSession sess, std::shared_ptr<IDataProvider::mfx_bitstream>&& str);
|
||||
EngineSession(mfxSession sess);
|
||||
std::string error_code_to_str() const;
|
||||
virtual ~EngineSession();
|
||||
|
||||
virtual const mfxFrameInfo& get_video_param() const = 0;
|
||||
|
||||
static void request_free_surface(mfxSession session,
|
||||
VPLAccelerationPolicy::pool_key_t key,
|
||||
VPLAccelerationPolicy &acceleration_policy,
|
||||
std::weak_ptr<Surface> &surface_to_exchange,
|
||||
bool reset_if_not_found = false);
|
||||
};
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
|
@ -0,0 +1,459 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
|
||||
#include <algorithm>
|
||||
#include <exception>
|
||||
|
||||
#include <opencv2/gapi/streaming/onevpl/data_provider_interface.hpp>
|
||||
|
||||
#include "streaming/onevpl/engine/preproc/preproc_engine.hpp"
|
||||
#include "streaming/onevpl/engine/preproc/preproc_session.hpp"
|
||||
|
||||
#include "streaming/onevpl/accelerators/accel_policy_interface.hpp"
|
||||
#include "streaming/onevpl/accelerators/surface/surface.hpp"
|
||||
#include "streaming/onevpl/cfg_params_parser.hpp"
|
||||
#include "logger.hpp"
|
||||
|
||||
#define ALIGN16(value) (((value + 15) >> 4) << 4)
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
|
||||
bool FrameInfoComparator::operator()(const mfxFrameInfo& lhs, const mfxFrameInfo& rhs) const {
|
||||
return lhs < rhs;
|
||||
}
|
||||
|
||||
bool FrameInfoComparator::equal_to(const mfxFrameInfo& lhs, const mfxFrameInfo& rhs) {
|
||||
return lhs == rhs;
|
||||
}
|
||||
|
||||
VPPPreprocEngine::VPPPreprocEngine(std::unique_ptr<VPLAccelerationPolicy>&& accel) :
|
||||
ProcessingEngineBase(std::move(accel)) {
|
||||
GAPI_LOG_INFO(nullptr, "Create VPP preprocessing engine");
|
||||
preprocessed_frames_count = 0;
|
||||
create_pipeline(
|
||||
// 0) preproc decoded surface with VPP params
|
||||
[this] (EngineSession& sess) -> ExecutionStatus
|
||||
{
|
||||
session_type &my_sess = static_cast<session_type&>(sess);
|
||||
while (!my_sess.sync_in_queue.empty()) {
|
||||
do {
|
||||
if (!my_sess.processing_surface_ptr.expired()) {
|
||||
session_type::incoming_task pending_op = my_sess.sync_in_queue.front();
|
||||
GAPI_LOG_DEBUG(nullptr, "pending IN operations count: " <<
|
||||
my_sess.sync_in_queue.size() <<
|
||||
", sync id: " <<
|
||||
pending_op.sync_handle <<
|
||||
", surface: " <<
|
||||
pending_op.decoded_surface_ptr);
|
||||
|
||||
my_sess.sync_in_queue.pop();
|
||||
auto *vpp_suface = my_sess.processing_surface_ptr.lock()->get_handle();
|
||||
|
||||
/* TODO: consider CROP/ROI here
|
||||
static int x_offset = 0;
|
||||
static int y_offset = 0;
|
||||
dec_surface->Info.CropX = x_offset;
|
||||
dec_surface->Info.CropY = y_offset;
|
||||
dec_surface->Info.CropW = 100 + x_offset++;
|
||||
dec_surface->Info.CropH = 100 + y_offset++;
|
||||
*/
|
||||
session_type::outgoing_task vpp_pending_op {pending_op.sync_handle, nullptr};
|
||||
my_sess.last_status = MFXVideoVPP_RunFrameVPPAsync(my_sess.session,
|
||||
pending_op.decoded_surface_ptr,
|
||||
vpp_suface,
|
||||
nullptr, &vpp_pending_op.sync_handle);
|
||||
vpp_pending_op.vpp_surface_ptr = vpp_suface;
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "Got VPP async operation" <<
|
||||
", sync id: " <<
|
||||
vpp_pending_op.sync_handle <<
|
||||
", dec surface: " <<
|
||||
pending_op.decoded_surface_ptr <<
|
||||
", trans surface: " <<
|
||||
vpp_pending_op.vpp_surface_ptr <<
|
||||
", status: " <<
|
||||
mfxstatus_to_string(my_sess.last_status));
|
||||
|
||||
// NB: process status
|
||||
if (my_sess.last_status == MFX_ERR_MORE_SURFACE ||
|
||||
my_sess.last_status == MFX_ERR_NONE) {
|
||||
vpp_pending_op.vpp_surface_ptr->Data.Locked++; // TODO -S- workaround
|
||||
my_sess.vpp_out_queue.emplace(vpp_pending_op);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
my_sess.swap_surface(*this);
|
||||
} catch (const std::runtime_error& ex) {
|
||||
// NB: not an error, yield CPU ticks to check
|
||||
// surface availability at a next phase.
|
||||
// But print WARNING to notify user about pipeline stuck
|
||||
GAPI_LOG_WARNING(nullptr, "[" << my_sess.session <<
|
||||
"] has no VPP surface, reason: " <<
|
||||
ex.what());
|
||||
my_sess.processing_surface_ptr.reset();
|
||||
break;
|
||||
}
|
||||
} while(my_sess.last_status == MFX_ERR_MORE_SURFACE);
|
||||
|
||||
if (my_sess.processing_surface_ptr.expired()) {
|
||||
// TODO break main loop
|
||||
break;
|
||||
}
|
||||
}
|
||||
return ExecutionStatus::Continue;
|
||||
},
|
||||
// 1) Wait for ASYNC decode result
|
||||
[this] (EngineSession& sess) -> ExecutionStatus
|
||||
{
|
||||
session_type& my_sess = static_cast<session_type&>(sess);
|
||||
do {
|
||||
if (!my_sess.vpp_out_queue.empty()) { // FIFO: check the oldest async operation complete
|
||||
session_type::outgoing_task& pending_op = my_sess.vpp_out_queue.front();
|
||||
sess.last_status = MFXVideoCORE_SyncOperation(sess.session, pending_op.sync_handle, 0);
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "pending VPP operations count: " <<
|
||||
my_sess.vpp_out_queue.size() <<
|
||||
", sync id: " <<
|
||||
pending_op.sync_handle <<
|
||||
", surface: " <<
|
||||
pending_op.vpp_surface_ptr <<
|
||||
", status: " <<
|
||||
mfxstatus_to_string(my_sess.last_status));
|
||||
|
||||
// put frames in ready queue on success
|
||||
if (MFX_ERR_NONE == sess.last_status) {
|
||||
on_frame_ready(my_sess, pending_op.vpp_surface_ptr);
|
||||
}
|
||||
}
|
||||
} while (MFX_ERR_NONE == sess.last_status && !my_sess.vpp_out_queue.empty());
|
||||
return ExecutionStatus::Continue;
|
||||
},
|
||||
// 2) Falls back on generic status procesing
|
||||
[this] (EngineSession& sess) -> ExecutionStatus
|
||||
{
|
||||
return this->process_error(sess.last_status, static_cast<session_type&>(sess));
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
cv::util::optional<pp_params> VPPPreprocEngine::is_applicable(const cv::MediaFrame& in_frame) {
|
||||
// TODO consider something smarter than RTI
|
||||
cv::util::optional<pp_params> ret;
|
||||
BaseFrameAdapter *vpl_adapter = in_frame.get<BaseFrameAdapter>();
|
||||
GAPI_LOG_DEBUG(nullptr, "validate VPP preprocessing is applicable for frame");
|
||||
if (vpl_adapter) {
|
||||
ret = cv::util::make_optional<pp_params>(
|
||||
pp_params::create<vpp_pp_params>(vpl_adapter->get_session_handle(),
|
||||
vpl_adapter->get_surface()->get_info()));
|
||||
GAPI_LOG_DEBUG(nullptr, "VPP preprocessing applicable, session [" <<
|
||||
vpl_adapter->get_session_handle() << "]");
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
pp_session VPPPreprocEngine::initialize_preproc(const pp_params& initial_frame_param,
|
||||
const GFrameDesc& required_frame_descr) {
|
||||
const vpp_pp_params ¶ms = initial_frame_param.get<vpp_pp_params>();
|
||||
|
||||
// adjust preprocessing settings
|
||||
mfxVideoParam mfxVPPParams{0};
|
||||
// NB: IN params for VPP session must be equal to decoded surface params
|
||||
mfxVPPParams.vpp.In = params.info;
|
||||
|
||||
// NB: OUT params must refer to IN params of a network
|
||||
GAPI_LOG_DEBUG(nullptr, "network input size: " << required_frame_descr.size.width <<
|
||||
"x" << required_frame_descr.size.height);
|
||||
mfxVPPParams.vpp.Out = mfxVPPParams.vpp.In;
|
||||
switch (required_frame_descr.fmt) {
|
||||
case MediaFormat::NV12:
|
||||
mfxVPPParams.vpp.Out.FourCC = MFX_FOURCC_NV12;
|
||||
break;
|
||||
default:
|
||||
GAPI_LOG_WARNING(nullptr, "Unsupported MediaFormat in preprocessing: " <<
|
||||
static_cast<int>(required_frame_descr.fmt) <<
|
||||
". Frame will be rejected");
|
||||
throw std::runtime_error("unsupported MediaFormat value in VPP preprocessing");
|
||||
}
|
||||
|
||||
mfxVPPParams.vpp.Out.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
|
||||
mfxVPPParams.vpp.Out.Width = static_cast<mfxU16>(required_frame_descr.size.width);
|
||||
mfxVPPParams.vpp.Out.Height = static_cast<mfxU16>(required_frame_descr.size.height);
|
||||
mfxVPPParams.vpp.Out.CropW = mfxVPPParams.vpp.Out.Width;
|
||||
mfxVPPParams.vpp.Out.CropH = mfxVPPParams.vpp.Out.Height;
|
||||
|
||||
// check In & Out equally to bypass preproc
|
||||
if (mfxVPPParams.vpp.Out == mfxVPPParams.vpp.In) {
|
||||
GAPI_LOG_DEBUG(nullptr, "no preproc required");
|
||||
return pp_session::create<EngineSession>(nullptr);
|
||||
}
|
||||
|
||||
// recalculate size param according to VPP alignment
|
||||
mfxVPPParams.vpp.Out.Width = ALIGN16(mfxVPPParams.vpp.Out.Width);
|
||||
mfxVPPParams.vpp.Out.Height = ALIGN16(mfxVPPParams.vpp.Out.Height);
|
||||
mfxVPPParams.vpp.Out.CropW = mfxVPPParams.vpp.Out.Width;
|
||||
mfxVPPParams.vpp.Out.CropH = mfxVPPParams.vpp.Out.Height;
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "\nFrom:\n{\n" << mfx_frame_info_to_string(mfxVPPParams.vpp.In) <<
|
||||
"}\nTo:\n{\n" << mfx_frame_info_to_string(mfxVPPParams.vpp.Out) << "}");
|
||||
|
||||
// find existing session
|
||||
GAPI_LOG_DEBUG(nullptr, "Find existing VPPPreprocSession for requested frame params"
|
||||
", total sessions: " << preproc_session_map.size());
|
||||
auto it = preproc_session_map.find(mfxVPPParams.vpp.In);
|
||||
if (it != preproc_session_map.end()) {
|
||||
GAPI_LOG_DEBUG(nullptr, "[" << it->second->session << "] found");
|
||||
return pp_session::create(std::static_pointer_cast<EngineSession>(it->second));
|
||||
}
|
||||
|
||||
// NB: make some sanity checks
|
||||
IDeviceSelector::DeviceScoreTable devices = acceleration_policy->get_device_selector()->select_devices();
|
||||
GAPI_Assert(devices.size() == 1 && "Multiple(or zero) acceleration devices case is unsupported");
|
||||
AccelType accel_type = devices.begin()->second.get_type();
|
||||
// assign acceleration
|
||||
if (accel_type == AccelType::DX11) {
|
||||
mfxVPPParams.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;
|
||||
} else {
|
||||
mfxVPPParams.IOPattern = MFX_IOPATTERN_IN_SYSTEM_MEMORY | MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
|
||||
}
|
||||
|
||||
// clone existing VPL session to inherit VPL loader configuration
|
||||
// and avoid refer to any global state
|
||||
// TODO no clone due to clone issue
|
||||
|
||||
mfxSession mfx_vpp_session = params.handle;
|
||||
mfxStatus sts = MFX_ERR_NONE;
|
||||
|
||||
// TODO: simply use clone after VPL bug fixing
|
||||
//sts = MFXCloneSession(params.handle, &mfx_vpp_session);
|
||||
sts = MFXCreateSession(mfx_handle, impl_number, &mfx_vpp_session);
|
||||
if (sts != MFX_ERR_NONE) {
|
||||
GAPI_LOG_WARNING(nullptr, "Cannot clone VPP session, error: " << mfxstatus_to_string(sts));
|
||||
GAPI_Assert(false && "Cannot continue VPP preprocessing");
|
||||
}
|
||||
|
||||
sts = MFXJoinSession(params.handle, mfx_vpp_session);
|
||||
if (sts != MFX_ERR_NONE) {
|
||||
GAPI_LOG_WARNING(nullptr, "Cannot join VPP sessions, error: " << mfxstatus_to_string(sts));
|
||||
GAPI_Assert(false && "Cannot continue VPP preprocessing");
|
||||
}
|
||||
|
||||
GAPI_LOG_INFO(nullptr, "[" << mfx_vpp_session << "] starting pool allocation");
|
||||
VPLAccelerationPolicy::pool_key_t vpp_out_pool_key {};
|
||||
try {
|
||||
// assign HW acceleration processor
|
||||
acceleration_policy->init(mfx_vpp_session);
|
||||
try {
|
||||
// ask to allocate external memory pool
|
||||
mfxFrameAllocRequest vppRequests[2];
|
||||
memset(&vppRequests, 0, sizeof(mfxFrameAllocRequest) * 2);
|
||||
sts = MFXVideoVPP_QueryIOSurf(mfx_vpp_session, &mfxVPPParams, vppRequests);
|
||||
if (MFX_ERR_NONE != sts) {
|
||||
GAPI_LOG_WARNING(nullptr, "cannot execute MFXVideoVPP_QueryIOSurf, error: " <<
|
||||
mfxstatus_to_string(sts));
|
||||
throw std::runtime_error("Cannot execute MFXVideoVPP_QueryIOSurf");
|
||||
}
|
||||
|
||||
// NB: Assing ID as upper limit descendant to distinguish specific VPP allocation
|
||||
// from decode allocations witch started from 0: by local module convention
|
||||
|
||||
static uint16_t request_id = 0;
|
||||
vppRequests[1].AllocId = std::numeric_limits<uint16_t>::max() - request_id++;
|
||||
GAPI_Assert(request_id != std::numeric_limits<uint16_t>::max() && "Something wrong");
|
||||
|
||||
vppRequests[1].Type |= MFX_MEMTYPE_FROM_VPPIN;
|
||||
vpp_out_pool_key = acceleration_policy->create_surface_pool(vppRequests[1],
|
||||
mfxVPPParams.vpp.Out);
|
||||
|
||||
sts = MFXVideoVPP_Init(mfx_vpp_session, &mfxVPPParams);
|
||||
if (MFX_ERR_NONE != sts) {
|
||||
GAPI_LOG_WARNING(nullptr, "cannot Init VPP, error: " <<
|
||||
mfxstatus_to_string(sts));
|
||||
// TODO consider deallocate pool
|
||||
// but not necessary now cause every fail processed as GAPI_Assert
|
||||
throw std::runtime_error("Cannot init VPP, error: " +
|
||||
mfxstatus_to_string(sts));
|
||||
}
|
||||
} catch (const std::exception&) {
|
||||
GAPI_LOG_WARNING(nullptr, "[" << mfx_vpp_session << "] allocation failed, rollback");
|
||||
acceleration_policy->deinit(mfx_vpp_session);
|
||||
throw;
|
||||
}
|
||||
} catch (const std::exception&) {
|
||||
MFXClose(mfx_vpp_session);
|
||||
GAPI_Assert(false && "Cannot init preproc resources");
|
||||
}
|
||||
|
||||
// create engine session after all
|
||||
session_ptr_type sess_ptr = register_session<session_type>(mfx_vpp_session,
|
||||
mfxVPPParams);
|
||||
sess_ptr->init_surface_pool(vpp_out_pool_key);
|
||||
sess_ptr->swap_surface(*this);
|
||||
|
||||
bool inserted = preproc_session_map.emplace(mfxVPPParams.vpp.In, sess_ptr).second;
|
||||
GAPI_Assert(inserted && "preproc session is exist");
|
||||
GAPI_LOG_INFO(nullptr, "VPPPreprocSession created, total sessions: " << preproc_session_map.size());
|
||||
return pp_session::create(std::static_pointer_cast<EngineSession>(sess_ptr));
|
||||
}
|
||||
|
||||
void VPPPreprocEngine::on_frame_ready(session_type& sess,
|
||||
mfxFrameSurface1* ready_surface)
|
||||
{
|
||||
GAPI_LOG_DEBUG(nullptr, "[" << sess.session << "], frame ready");
|
||||
|
||||
// manage memory ownership rely on acceleration policy
|
||||
ready_surface->Data.Locked--; // TODO -S- workaround
|
||||
VPLAccelerationPolicy::FrameConstructorArgs args{ready_surface, sess.session};
|
||||
auto frame_adapter = acceleration_policy->create_frame_adapter(sess.vpp_pool_id,
|
||||
args);
|
||||
ready_frames.emplace(cv::MediaFrame(std::move(frame_adapter)), sess.generate_frame_meta());
|
||||
|
||||
// pop away synced out object
|
||||
sess.vpp_out_queue.pop();
|
||||
}
|
||||
|
||||
VPPPreprocEngine::session_ptr
|
||||
VPPPreprocEngine::initialize_session(mfxSession,
|
||||
const std::vector<CfgParam>&,
|
||||
std::shared_ptr<IDataProvider>) {
|
||||
return {};
|
||||
}
|
||||
|
||||
cv::MediaFrame VPPPreprocEngine::run_sync(const pp_session& sess, const cv::MediaFrame& in_frame) {
|
||||
|
||||
std::shared_ptr<EngineSession> pp_sess_impl = sess.get<EngineSession>();
|
||||
if (!pp_sess_impl) {
|
||||
// bypass case
|
||||
return in_frame;
|
||||
}
|
||||
session_ptr_type s = std::static_pointer_cast<session_type>(pp_sess_impl);
|
||||
GAPI_DbgAssert(s && "Session is nullptr");
|
||||
GAPI_DbgAssert(is_applicable(in_frame) &&
|
||||
"VPP preproc is not applicable for the given frame");
|
||||
BaseFrameAdapter *vpl_adapter = in_frame.get<BaseFrameAdapter>();
|
||||
if (!vpl_adapter) {
|
||||
GAPI_LOG_WARNING(nullptr, "VPP preproc is inapplicable for a given frame. "
|
||||
"Make sure the frame is collected using onevpl::GSource");
|
||||
throw std::runtime_error("VPP preproc is inapplicable for given frame");
|
||||
}
|
||||
|
||||
// schedule decoded surface into preproc queue
|
||||
session_type::incoming_task in_preproc_request {nullptr,
|
||||
vpl_adapter->get_surface()->get_handle(),
|
||||
in_frame};
|
||||
s->sync_in_queue.emplace(in_preproc_request);
|
||||
|
||||
// invoke pipeline to transform decoded surface into preprocessed surface
|
||||
try
|
||||
{
|
||||
ExecutionStatus status = ExecutionStatus::Continue;
|
||||
while (0 == get_ready_frames_count() &&
|
||||
status == ExecutionStatus::Continue) {
|
||||
status = process(s->session);
|
||||
}
|
||||
|
||||
if (get_ready_frames_count() == 0) {
|
||||
GAPI_LOG_WARNING(nullptr, "failed: cannot obtain preprocessed frames, last status: " <<
|
||||
ProcessingEngineBase::status_to_string(status));
|
||||
throw std::runtime_error("cannot finalize VPP preprocessing operation");
|
||||
}
|
||||
} catch(const std::exception&) {
|
||||
throw;
|
||||
}
|
||||
// obtain new frame is available
|
||||
cv::gapi::wip::Data data;
|
||||
get_frame(data);
|
||||
preprocessed_frames_count++;
|
||||
GAPI_LOG_DEBUG(nullptr, "processed frames count: " << preprocessed_frames_count);
|
||||
return cv::util::get<cv::MediaFrame>(data);
|
||||
}
|
||||
|
||||
ProcessingEngineBase::ExecutionStatus VPPPreprocEngine::process_error(mfxStatus status, session_type& sess) {
|
||||
GAPI_LOG_DEBUG(nullptr, "status: " << mfxstatus_to_string(status));
|
||||
|
||||
switch (status) {
|
||||
case MFX_ERR_NONE:
|
||||
{
|
||||
// prepare sync object for new surface
|
||||
try {
|
||||
sess.swap_surface(*this);
|
||||
return ExecutionStatus::Continue;
|
||||
} catch (const std::runtime_error& ex) {
|
||||
GAPI_LOG_WARNING(nullptr, "[" << sess.session << "] error: " << ex.what());
|
||||
return ExecutionStatus::Continue; // read more data
|
||||
}
|
||||
}
|
||||
case MFX_ERR_MORE_DATA: // The function requires more bitstream at input before decoding can proceed
|
||||
return ExecutionStatus::Processed;
|
||||
case MFX_ERR_MORE_SURFACE:
|
||||
{
|
||||
// The function requires more frame surface at output before decoding can proceed.
|
||||
// This applies to external memory allocations and should not be expected for
|
||||
// a simple internal allocation case like this
|
||||
try {
|
||||
sess.swap_surface(*this);
|
||||
return ExecutionStatus::Continue;
|
||||
} catch (const std::runtime_error& ex) {
|
||||
GAPI_LOG_WARNING(nullptr, "[" << sess.session << "] error: " << ex.what());
|
||||
return ExecutionStatus::Continue; // read more data
|
||||
}
|
||||
break;
|
||||
}
|
||||
case MFX_ERR_DEVICE_LOST:
|
||||
// For non-CPU implementations,
|
||||
// Cleanup if device is lost
|
||||
GAPI_DbgAssert(false && "VPPPreprocEngine::process_error - "
|
||||
"MFX_ERR_DEVICE_LOST is not processed");
|
||||
break;
|
||||
case MFX_WRN_DEVICE_BUSY:
|
||||
// For non-CPU implementations,
|
||||
// Wait a few milliseconds then try again
|
||||
GAPI_DbgAssert(false && "VPPPreprocEngine::process_error - "
|
||||
"MFX_WRN_DEVICE_BUSY is not processed");
|
||||
break;
|
||||
case MFX_WRN_VIDEO_PARAM_CHANGED:
|
||||
// The decoder detected a new sequence header in the bitstream.
|
||||
// Video parameters may have changed.
|
||||
// In external memory allocation case, might need to reallocate the output surface
|
||||
GAPI_LOG_WARNING(nullptr, "[" << sess.session << "] got MFX_WRN_VIDEO_PARAM_CHANGED");
|
||||
return ExecutionStatus::Continue;
|
||||
break;
|
||||
case MFX_ERR_INCOMPATIBLE_VIDEO_PARAM:
|
||||
// The function detected that video parameters provided by the application
|
||||
// are incompatible with initialization parameters.
|
||||
// The application should close the component and then reinitialize it
|
||||
GAPI_DbgAssert(false && "VPPPreprocEngine::process_error - "
|
||||
"MFX_ERR_INCOMPATIBLE_VIDEO_PARAM is not processed");
|
||||
break;
|
||||
case MFX_ERR_REALLOC_SURFACE:
|
||||
// Bigger surface_work required. May be returned only if
|
||||
// mfxInfoMFX::EnableReallocRequest was set to ON during initialization.
|
||||
// This applies to external memory allocations and should not be expected for
|
||||
// a simple internal allocation case like this
|
||||
GAPI_DbgAssert(false && "VPPPreprocEngine::process_error - "
|
||||
"MFX_ERR_REALLOC_SURFACE is not processed");
|
||||
break;
|
||||
case MFX_WRN_IN_EXECUTION:
|
||||
GAPI_LOG_WARNING(nullptr, "[" << sess.session << "] got MFX_WRN_IN_EXECUTION");
|
||||
return ExecutionStatus::Continue;
|
||||
default:
|
||||
GAPI_LOG_WARNING(nullptr, "Unknown status code: " << mfxstatus_to_string(status) <<
|
||||
", decoded frames: " << sess.preprocessed_frames_count);
|
||||
break;
|
||||
}
|
||||
|
||||
return ExecutionStatus::Failed;
|
||||
}
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
#endif // HAVE_ONEVPL
|
@ -0,0 +1,67 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#ifndef GAPI_STREAMING_ONVPL_PREPROC_ENGINE_HPP
|
||||
#define GAPI_STREAMING_ONVPL_PREPROC_ENGINE_HPP
|
||||
#include <stdio.h>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
|
||||
#include "streaming/onevpl/engine/processing_engine_base.hpp"
|
||||
#include "streaming/onevpl/accelerators/utils/shared_lock.hpp"
|
||||
|
||||
#include "streaming/onevpl/engine/preproc_engine_interface.hpp"
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
#include "streaming/onevpl/onevpl_export.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
// GAPI_EXPORTS for tests
|
||||
struct GAPI_EXPORTS FrameInfoComparator {
|
||||
bool operator()(const mfxFrameInfo& lhs, const mfxFrameInfo& rhs) const;
|
||||
static bool equal_to(const mfxFrameInfo& lhs, const mfxFrameInfo& rhs);
|
||||
};
|
||||
|
||||
class VPPPreprocSession;
|
||||
struct IDataProvider;
|
||||
struct VPLAccelerationPolicy;
|
||||
|
||||
// GAPI_EXPORTS for tests
|
||||
class GAPI_EXPORTS VPPPreprocEngine final : public ProcessingEngineBase,
|
||||
public cv::gapi::wip::IPreprocEngine {
|
||||
public:
|
||||
using session_type = VPPPreprocSession;
|
||||
using session_ptr_type = std::shared_ptr<session_type>;
|
||||
|
||||
VPPPreprocEngine(std::unique_ptr<VPLAccelerationPolicy>&& accel);
|
||||
|
||||
cv::util::optional<pp_params> is_applicable(const cv::MediaFrame& in_frame) override;
|
||||
|
||||
pp_session initialize_preproc(const pp_params& initial_frame_param,
|
||||
const GFrameDesc& required_frame_descr) override;
|
||||
|
||||
cv::MediaFrame run_sync(const pp_session &session_handle,
|
||||
const cv::MediaFrame& in_frame) override;
|
||||
|
||||
private:
|
||||
std::map<mfxFrameInfo, session_ptr_type, FrameInfoComparator> preproc_session_map;
|
||||
void on_frame_ready(session_type& sess,
|
||||
mfxFrameSurface1* ready_surface);
|
||||
ExecutionStatus process_error(mfxStatus status, session_type& sess);
|
||||
session_ptr initialize_session(mfxSession mfx_session,
|
||||
const std::vector<CfgParam>& cfg_params,
|
||||
std::shared_ptr<IDataProvider> provider) override;
|
||||
size_t preprocessed_frames_count;
|
||||
};
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
#endif // HAVE_ONEVPL
|
||||
#endif // GAPI_STREAMING_ONVPL_PREPROC_ENGINE_HPP
|
@ -0,0 +1,67 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
|
||||
#include <chrono>
|
||||
#include <exception>
|
||||
|
||||
#include "streaming/onevpl/engine/preproc/preproc_session.hpp"
|
||||
#include "streaming/onevpl/engine/preproc/preproc_engine.hpp"
|
||||
#include "streaming/onevpl/accelerators/surface/surface.hpp"
|
||||
#include "streaming/onevpl/utils.hpp"
|
||||
#include "logger.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
VPPPreprocSession::VPPPreprocSession(mfxSession sess, const mfxVideoParam& vpp_out_param) :
|
||||
EngineSession(sess),
|
||||
mfx_vpp_out_param(vpp_out_param),
|
||||
processing_surface_ptr(),
|
||||
sync_in_queue(),
|
||||
vpp_out_queue(),
|
||||
preprocessed_frames_count()
|
||||
{
|
||||
}
|
||||
|
||||
VPPPreprocSession::~VPPPreprocSession() {
|
||||
GAPI_LOG_INFO(nullptr, "Close VPP for session: " << session);
|
||||
MFXVideoVPP_Close(session);
|
||||
}
|
||||
|
||||
Data::Meta VPPPreprocSession::generate_frame_meta() {
|
||||
const auto now = std::chrono::system_clock::now();
|
||||
const auto dur = std::chrono::duration_cast<std::chrono::microseconds>
|
||||
(now.time_since_epoch());
|
||||
Data::Meta meta {
|
||||
{cv::gapi::streaming::meta_tag::timestamp, int64_t{dur.count()} },
|
||||
{cv::gapi::streaming::meta_tag::seq_id, int64_t{preprocessed_frames_count++}}
|
||||
};
|
||||
return meta;
|
||||
}
|
||||
|
||||
void VPPPreprocSession::swap_surface(VPPPreprocEngine& engine) {
|
||||
VPLAccelerationPolicy* acceleration_policy = engine.get_accel();
|
||||
GAPI_Assert(acceleration_policy && "Empty acceleration_policy");
|
||||
request_free_surface(session, vpp_pool_id, *acceleration_policy,
|
||||
processing_surface_ptr, true);
|
||||
}
|
||||
|
||||
void VPPPreprocSession::init_surface_pool(VPLAccelerationPolicy::pool_key_t key) {
|
||||
GAPI_Assert(key && "Init preproc pull with empty key");
|
||||
vpp_pool_id = key;
|
||||
}
|
||||
|
||||
const mfxFrameInfo& VPPPreprocSession::get_video_param() const {
|
||||
return mfx_vpp_out_param.vpp.Out;
|
||||
}
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
#endif // HAVE_ONEVPL
|
@ -0,0 +1,61 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#ifndef GAPI_STREAMING_ONVPL_PREPROC_SESSION_HPP
|
||||
#define GAPI_STREAMING_ONVPL_PREPROC_SESSION_HPP
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
|
||||
#include <opencv2/gapi/streaming/meta.hpp>
|
||||
#include "streaming/onevpl/engine/engine_session.hpp"
|
||||
#include "streaming/onevpl/accelerators/accel_policy_interface.hpp"
|
||||
#include "streaming/onevpl/engine/preproc/vpp_preproc_defines.hpp"
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
class VPPPreprocEngine;
|
||||
|
||||
class VPPPreprocSession : public EngineSession {
|
||||
public:
|
||||
friend class VPPPreprocEngine;
|
||||
VPPPreprocSession(mfxSession sess, const mfxVideoParam &vpp_out_param);
|
||||
~VPPPreprocSession();
|
||||
|
||||
Data::Meta generate_frame_meta();
|
||||
void swap_surface(VPPPreprocEngine& engine);
|
||||
void init_surface_pool(VPLAccelerationPolicy::pool_key_t key);
|
||||
|
||||
virtual const mfxFrameInfo& get_video_param() const override;
|
||||
private:
|
||||
mfxVideoParam mfx_vpp_out_param;
|
||||
VPLAccelerationPolicy::pool_key_t vpp_pool_id;
|
||||
std::weak_ptr<Surface> processing_surface_ptr;
|
||||
|
||||
struct incoming_task {
|
||||
mfxSyncPoint sync_handle;
|
||||
mfxFrameSurface1* decoded_surface_ptr;
|
||||
cv::MediaFrame decoded_frame_copy;
|
||||
};
|
||||
|
||||
struct outgoing_task {
|
||||
mfxSyncPoint sync_handle;
|
||||
mfxFrameSurface1* vpp_surface_ptr;
|
||||
};
|
||||
|
||||
std::queue<incoming_task> sync_in_queue;
|
||||
std::queue<outgoing_task> vpp_out_queue;
|
||||
int64_t preprocessed_frames_count;
|
||||
};
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
#endif // HAVE_ONEVPL
|
||||
#endif // GAPI_STREAMING_ONVPL_PREPROC_SESSION_HPP
|
86
modules/gapi/src/streaming/onevpl/engine/preproc/utils.cpp
Normal file
86
modules/gapi/src/streaming/onevpl/engine/preproc/utils.cpp
Normal file
@ -0,0 +1,86 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "streaming/onevpl/engine/preproc/utils.hpp"
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
#include "streaming/onevpl/onevpl_export.hpp"
|
||||
#include "logger.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
namespace utils {
|
||||
|
||||
cv::MediaFormat fourcc_to_MediaFormat(int value) {
|
||||
switch (value)
|
||||
{
|
||||
case MFX_FOURCC_BGRP:
|
||||
return cv::MediaFormat::BGR;
|
||||
case MFX_FOURCC_NV12:
|
||||
return cv::MediaFormat::NV12;
|
||||
default:
|
||||
GAPI_LOG_WARNING(nullptr, "Unsupported FourCC format requested: " << value <<
|
||||
". Cannot cast to cv::MediaFrame");
|
||||
GAPI_Assert(false && "Unsupported FOURCC");
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
int MediaFormat_to_fourcc(cv::MediaFormat value) {
|
||||
switch (value)
|
||||
{
|
||||
case cv::MediaFormat::BGR:
|
||||
return MFX_FOURCC_BGRP;
|
||||
case cv::MediaFormat::NV12:
|
||||
return MFX_FOURCC_NV12;
|
||||
default:
|
||||
GAPI_LOG_WARNING(nullptr, "Unsupported cv::MediaFormat format requested: " <<
|
||||
static_cast<typename std::underlying_type<cv::MediaFormat>::type>(value) <<
|
||||
". Cannot cast to FourCC");
|
||||
GAPI_Assert(false && "Unsupported cv::MediaFormat");
|
||||
}
|
||||
}
|
||||
int MediaFormat_to_chroma(cv::MediaFormat value) {
|
||||
switch (value)
|
||||
{
|
||||
case cv::MediaFormat::BGR:
|
||||
return MFX_CHROMAFORMAT_MONOCHROME;
|
||||
case cv::MediaFormat::NV12:
|
||||
return MFX_CHROMAFORMAT_YUV420;
|
||||
default:
|
||||
GAPI_LOG_WARNING(nullptr, "Unsupported cv::MediaFormat format requested: " <<
|
||||
static_cast<typename std::underlying_type<cv::MediaFormat>::type>(value) <<
|
||||
". Cannot cast to ChromaFormateIdc");
|
||||
GAPI_Assert(false && "Unsupported cv::MediaFormat");
|
||||
}
|
||||
}
|
||||
|
||||
mfxFrameInfo to_mfxFrameInfo(const cv::GFrameDesc& frame_info) {
|
||||
mfxFrameInfo ret {0};
|
||||
ret.FourCC = MediaFormat_to_fourcc(frame_info.fmt);
|
||||
ret.ChromaFormat = MediaFormat_to_chroma(frame_info.fmt);
|
||||
ret.Width = frame_info.size.width;
|
||||
ret.Height = frame_info.size.height;
|
||||
ret.CropX = 0;
|
||||
ret.CropY = 0;
|
||||
ret.CropW = 0;
|
||||
ret.CropH = 0;
|
||||
ret.PicStruct = MFX_PICSTRUCT_UNKNOWN;
|
||||
ret.FrameRateExtN = 0;
|
||||
ret.FrameRateExtD = 0;
|
||||
return ret;
|
||||
}
|
||||
} // namespace utils
|
||||
} // namespace cv
|
||||
} // namespace gapi
|
||||
} // namespace wip
|
||||
} // namespace onevpl
|
||||
|
||||
#endif // HAVE_ONEVPL
|
32
modules/gapi/src/streaming/onevpl/engine/preproc/utils.hpp
Normal file
32
modules/gapi/src/streaming/onevpl/engine/preproc/utils.hpp
Normal file
@ -0,0 +1,32 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#ifndef GAPI_STREAMING_ONEVPL_PREPROC_UTILS_HPP
|
||||
#define GAPI_STREAMING_ONEVPL_PREPROC_UTILS_HPP
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
#include "streaming/onevpl/onevpl_export.hpp"
|
||||
|
||||
#include <opencv2/gapi/gframe.hpp>
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
namespace utils {
|
||||
|
||||
cv::MediaFormat fourcc_to_MediaFormat(int value);
|
||||
int MediaFormat_to_fourcc(cv::MediaFormat value);
|
||||
int MediaFormat_to_chroma(cv::MediaFormat value);
|
||||
|
||||
mfxFrameInfo to_mfxFrameInfo(const cv::GFrameDesc& frame_info);
|
||||
} // namespace utils
|
||||
} // namespace cv
|
||||
} // namespace gapi
|
||||
} // namespace wip
|
||||
} // namespace onevpl
|
||||
#endif // #ifdef HAVE_ONEVPL
|
||||
#endif // GAPI_STREAMING_ONEVPL_PREPROC_UTILS_HPP
|
@ -0,0 +1,29 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
|
||||
#ifndef VPP_PREPROC_ENGINE
|
||||
#define VPP_PREPROC_ENGINE
|
||||
#include "streaming/onevpl/onevpl_export.hpp"
|
||||
#include "streaming/onevpl/engine/engine_session.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
struct vpp_pp_params {
|
||||
mfxSession handle;
|
||||
mfxFrameInfo info;
|
||||
};
|
||||
|
||||
using vpp_pp_session_ptr = std::shared_ptr<EngineSession>;
|
||||
} // namespace onevpl
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
#endif // VPP_PREPROC_ENGINE
|
||||
#endif // HAVE_ONEVPL
|
89
modules/gapi/src/streaming/onevpl/engine/preproc_defines.hpp
Normal file
89
modules/gapi/src/streaming/onevpl/engine/preproc_defines.hpp
Normal file
@ -0,0 +1,89 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#ifndef GAPI_STREAMING_ONEVPL_ENGINE_PREPROC_DEFINES_HPP
|
||||
#define GAPI_STREAMING_ONEVPL_ENGINE_PREPROC_DEFINES_HPP
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
#include "streaming/onevpl/utils.hpp"
|
||||
#include "streaming/onevpl/engine/preproc/vpp_preproc_defines.hpp"
|
||||
#endif // HAVE_ONEVPL
|
||||
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
|
||||
#ifdef VPP_PREPROC_ENGINE
|
||||
#define GAPI_BACKEND_PP_PARAMS cv::gapi::wip::onevpl::vpp_pp_params
|
||||
#define GAPI_BACKEND_PP_SESSIONS cv::gapi::wip::onevpl::vpp_pp_session_ptr
|
||||
#else // VPP_PREPROC_ENGINE
|
||||
struct empty_pp_params {};
|
||||
struct empty_pp_session {};
|
||||
#define GAPI_BACKEND_PP_PARAMS cv::gapi::wip::empty_pp_params;
|
||||
#define GAPI_BACKEND_PP_SESSIONS std::shared_ptr<cv::gapi::wip::empty_pp_session>;
|
||||
#endif // VPP_PREPROC_ENGINE
|
||||
|
||||
struct pp_params {
|
||||
using value_type = cv::util::variant<GAPI_BACKEND_PP_PARAMS>;
|
||||
|
||||
template<typename BackendSpecificParamType, typename ...Args>
|
||||
static pp_params create(Args&& ...args) {
|
||||
static_assert(cv::detail::contains<BackendSpecificParamType, GAPI_BACKEND_PP_PARAMS>::value,
|
||||
"Invalid BackendSpecificParamType requested");
|
||||
pp_params ret;
|
||||
ret.value = BackendSpecificParamType{std::forward<Args>(args)...};
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<typename BackendSpecificParamType>
|
||||
BackendSpecificParamType& get() {
|
||||
static_assert(cv::detail::contains<BackendSpecificParamType, GAPI_BACKEND_PP_PARAMS>::value,
|
||||
"Invalid BackendSpecificParamType requested");
|
||||
return cv::util::get<BackendSpecificParamType>(value);
|
||||
}
|
||||
|
||||
template<typename BackendSpecificParamType>
|
||||
const BackendSpecificParamType& get() const {
|
||||
return static_cast<const BackendSpecificParamType&>(const_cast<pp_params*>(this)->get<BackendSpecificParamType>());
|
||||
}
|
||||
private:
|
||||
value_type value;
|
||||
};
|
||||
|
||||
struct pp_session {
|
||||
using value_type = cv::util::variant<GAPI_BACKEND_PP_SESSIONS>;
|
||||
|
||||
template<typename BackendSpecificSesionType>
|
||||
static pp_session create(std::shared_ptr<BackendSpecificSesionType> session) {
|
||||
static_assert(cv::detail::contains<std::shared_ptr<BackendSpecificSesionType>,
|
||||
GAPI_BACKEND_PP_SESSIONS>::value,
|
||||
"Invalid BackendSpecificSesionType requested");
|
||||
pp_session ret;
|
||||
ret.value = session;
|
||||
return ret;
|
||||
}
|
||||
|
||||
template<typename BackendSpecificSesionType>
|
||||
std::shared_ptr<BackendSpecificSesionType> get() {
|
||||
using ptr_type = std::shared_ptr<BackendSpecificSesionType>;
|
||||
static_assert(cv::detail::contains<ptr_type, GAPI_BACKEND_PP_SESSIONS>::value,
|
||||
"Invalid BackendSpecificSesionType requested");
|
||||
return cv::util::get<ptr_type>(value);
|
||||
}
|
||||
|
||||
template<typename BackendSpecificSesionType>
|
||||
std::shared_ptr<BackendSpecificSesionType> get() const {
|
||||
return const_cast<pp_session*>(this)->get<BackendSpecificSesionType>();
|
||||
}
|
||||
private:
|
||||
value_type value;
|
||||
};
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
|
||||
#endif // GAPI_STREAMING_ONEVPL_ENGINE_PREPROC_DEFINES_HPP
|
@ -0,0 +1,35 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
#ifndef GAPI_STREAMING_ONEVPL_ENGINE_PROCESSING_ENGINE_INTERFACE_HPP
|
||||
#define GAPI_STREAMING_ONEVPL_ENGINE_PROCESSING_ENGINE_INTERFACE_HPP
|
||||
|
||||
#include "precomp.hpp"
|
||||
#include <opencv2/gapi/media.hpp>
|
||||
#include <opencv2/gapi/util/optional.hpp>
|
||||
|
||||
#include "streaming/onevpl/engine/preproc_defines.hpp"
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
|
||||
struct IPreprocEngine {
|
||||
virtual ~IPreprocEngine() = default;
|
||||
|
||||
virtual cv::util::optional<pp_params>
|
||||
is_applicable(const cv::MediaFrame& in_frame) = 0;
|
||||
|
||||
virtual pp_session
|
||||
initialize_preproc(const pp_params& initial_frame_param,
|
||||
const GFrameDesc& required_frame_descr) = 0;
|
||||
virtual cv::MediaFrame
|
||||
run_sync(const pp_session &sess, const cv::MediaFrame& in_frame) = 0;
|
||||
};
|
||||
} // namespace wip
|
||||
} // namespace gapi
|
||||
} // namespace cv
|
||||
#endif // GAPI_STREAMING_ONEVPL_ENGINE_PROCESSING_ENGINE_INTERFACE_HPP
|
@ -26,34 +26,29 @@ namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
using vpp_param_storage = const std::map<std::string, mfxVariant>;
|
||||
using vpp_param_storage_cit = typename vpp_param_storage::const_iterator;
|
||||
|
||||
template<typename Type>
|
||||
bool set_vpp_param(const char* name, Type& out_vpp_param,
|
||||
const std::map<std::string, mfxVariant> ¶ms_storage,
|
||||
mfxSession session);
|
||||
Type get_mfx_value(const vpp_param_storage_cit &cit);
|
||||
|
||||
template<>
|
||||
bool set_vpp_param<uint32_t>(const char* name, uint32_t& out_vpp_param,
|
||||
const std::map<std::string, mfxVariant> ¶ms_storage,
|
||||
mfxSession session) {
|
||||
auto it = params_storage.find(name);
|
||||
if (it != params_storage.end()) {
|
||||
auto value = it->second.Data.U32;
|
||||
GAPI_LOG_INFO(nullptr, "[" << session << "] set \"" << name <<
|
||||
"\": " << value);
|
||||
out_vpp_param = value;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
uint16_t get_mfx_value<uint16_t>(const vpp_param_storage_cit& cit) {
|
||||
return cit->second.Data.U16;
|
||||
}
|
||||
|
||||
template<>
|
||||
bool set_vpp_param<uint16_t>(const char* name, uint16_t& out_vpp_param,
|
||||
const std::map<std::string, mfxVariant> ¶ms_storage,
|
||||
mfxSession session) {
|
||||
uint32_t get_mfx_value<uint32_t>(const vpp_param_storage_cit& cit) {
|
||||
return cit->second.Data.U32;
|
||||
}
|
||||
|
||||
template<typename Type>
|
||||
bool set_vpp_param(const char* name, Type& out_vpp_param,
|
||||
const vpp_param_storage ¶ms_storage,
|
||||
mfxSession session) {
|
||||
auto it = params_storage.find(name);
|
||||
if (it != params_storage.end()) {
|
||||
auto value = it->second.Data.U16;
|
||||
auto value = get_mfx_value<Type>(it);
|
||||
GAPI_LOG_INFO(nullptr, "[" << session << "] set \"" << name <<
|
||||
"\": " << value);
|
||||
out_vpp_param = value;
|
||||
@ -81,7 +76,6 @@ VPLLegacyTranscodeEngine::VPLLegacyTranscodeEngine(std::unique_ptr<VPLAccelerati
|
||||
: VPLLegacyDecodeEngine(std::move(accel)) {
|
||||
|
||||
GAPI_LOG_INFO(nullptr, "Create Legacy Transcode Engine");
|
||||
//inject_pipeline_operations(2,
|
||||
create_pipeline(
|
||||
// 1) Read File
|
||||
[this] (EngineSession& sess) -> ExecutionStatus
|
||||
@ -110,11 +104,8 @@ VPLLegacyTranscodeEngine::VPLLegacyTranscodeEngine(std::unique_ptr<VPLAccelerati
|
||||
// enqueue decode operation with current session surface
|
||||
my_sess.last_status =
|
||||
MFXVideoDECODE_DecodeFrameAsync(my_sess.session,
|
||||
(my_sess.data_provider || (my_sess.stream && my_sess.stream->DataLength))
|
||||
? my_sess.stream.get()
|
||||
|
||||
: nullptr, /* No more data to read, start decode draining mode*/
|
||||
my_sess.procesing_surface_ptr.lock()->get_handle(),
|
||||
my_sess.get_mfx_bitstream_ptr(),
|
||||
my_sess.processing_surface_ptr.lock()->get_handle(),
|
||||
&sync_pair.second,
|
||||
&sync_pair.first);
|
||||
|
||||
@ -122,7 +113,7 @@ VPLLegacyTranscodeEngine::VPLLegacyTranscodeEngine(std::unique_ptr<VPLAccelerati
|
||||
", sync id: " <<
|
||||
sync_pair.first <<
|
||||
", dec in surface: " <<
|
||||
my_sess.procesing_surface_ptr.lock()->get_handle() <<
|
||||
my_sess.processing_surface_ptr.lock()->get_handle() <<
|
||||
", dec out surface: " << sync_pair.second <<
|
||||
", status: " <<
|
||||
mfxstatus_to_string(my_sess.last_status));
|
||||
@ -134,12 +125,12 @@ VPLLegacyTranscodeEngine::VPLLegacyTranscodeEngine(std::unique_ptr<VPLAccelerati
|
||||
my_sess.last_status == MFX_WRN_DEVICE_BUSY) {
|
||||
try {
|
||||
if (my_sess.last_status == MFX_ERR_MORE_SURFACE) {
|
||||
my_sess.swap_surface(*this);
|
||||
my_sess.swap_decode_surface(*this);
|
||||
}
|
||||
my_sess.last_status =
|
||||
MFXVideoDECODE_DecodeFrameAsync(my_sess.session,
|
||||
my_sess.stream.get(),
|
||||
my_sess.procesing_surface_ptr.lock()->get_handle(),
|
||||
my_sess.get_mfx_bitstream_ptr(),
|
||||
my_sess.processing_surface_ptr.lock()->get_handle(),
|
||||
&sync_pair.second,
|
||||
&sync_pair.first);
|
||||
|
||||
@ -287,11 +278,11 @@ VPLLegacyTranscodeEngine::initialize_session(mfxSession mfx_session,
|
||||
|
||||
// override some in-params
|
||||
if (set_vpp_param(CfgParam::vpp_in_width_name(), mfxVPPParams.vpp.In.Width,
|
||||
cfg_vpp_params, mfx_session)) {
|
||||
cfg_vpp_params, mfx_session)) {
|
||||
mfxVPPParams.vpp.In.Width = ALIGN16(mfxVPPParams.vpp.In.Width);
|
||||
}
|
||||
if (set_vpp_param(CfgParam::vpp_in_height_name(), mfxVPPParams.vpp.In.Height,
|
||||
cfg_vpp_params, mfx_session)) {
|
||||
cfg_vpp_params, mfx_session)) {
|
||||
mfxVPPParams.vpp.In.Height = ALIGN16(mfxVPPParams.vpp.In.Height);
|
||||
}
|
||||
set_vpp_param(CfgParam::vpp_in_crop_x_name(), mfxVPPParams.vpp.In.CropX,
|
||||
@ -309,11 +300,11 @@ VPLLegacyTranscodeEngine::initialize_session(mfxSession mfx_session,
|
||||
set_vpp_param(CfgParam::vpp_out_chroma_format_name(), mfxVPPParams.vpp.Out.ChromaFormat,
|
||||
cfg_vpp_params, mfx_session);
|
||||
if (set_vpp_param(CfgParam::vpp_out_width_name(), mfxVPPParams.vpp.Out.Width,
|
||||
cfg_vpp_params, mfx_session)) {
|
||||
cfg_vpp_params, mfx_session)) {
|
||||
mfxVPPParams.vpp.Out.Width = ALIGN16(mfxVPPParams.vpp.Out.Width);
|
||||
}
|
||||
if (set_vpp_param(CfgParam::vpp_out_height_name(), mfxVPPParams.vpp.Out.Height,
|
||||
cfg_vpp_params, mfx_session)) {
|
||||
cfg_vpp_params, mfx_session)) {
|
||||
mfxVPPParams.vpp.Out.Height = ALIGN16(mfxVPPParams.vpp.Out.Height);
|
||||
}
|
||||
set_vpp_param(CfgParam::vpp_out_crop_x_name(), mfxVPPParams.vpp.Out.CropX,
|
||||
@ -394,7 +385,7 @@ VPLLegacyTranscodeEngine::initialize_session(mfxSession mfx_session,
|
||||
sess_ptr->init_transcode_surface_pool(vpp_out_pool_key);
|
||||
|
||||
// prepare working surfaces
|
||||
sess_ptr->swap_surface(*this);
|
||||
sess_ptr->swap_decode_surface(*this);
|
||||
sess_ptr->swap_transcode_surface(*this);
|
||||
return sess_ptr;
|
||||
}
|
||||
@ -452,10 +443,6 @@ void VPLLegacyTranscodeEngine::validate_vpp_param(const mfxVideoParam& mfxVPPPar
|
||||
GAPI_LOG_INFO(nullptr, "Finished VPP param validation");
|
||||
}
|
||||
|
||||
ProcessingEngineBase::ExecutionStatus VPLLegacyTranscodeEngine::execute_op(operation_t& op, EngineSession& sess) {
|
||||
return op(sess);
|
||||
}
|
||||
|
||||
void VPLLegacyTranscodeEngine::on_frame_ready(LegacyTranscodeSession& sess,
|
||||
mfxFrameSurface1* ready_surface)
|
||||
{
|
||||
@ -463,8 +450,10 @@ void VPLLegacyTranscodeEngine::on_frame_ready(LegacyTranscodeSession& sess,
|
||||
|
||||
// manage memory ownership rely on acceleration policy
|
||||
ready_surface->Data.Locked--; // TODO -S- workaround
|
||||
|
||||
VPLAccelerationPolicy::FrameConstructorArgs args{ready_surface, sess.session};
|
||||
auto frame_adapter = acceleration_policy->create_frame_adapter(sess.vpp_out_pool_id,
|
||||
ready_surface);
|
||||
args);
|
||||
ready_frames.emplace(cv::MediaFrame(std::move(frame_adapter)), sess.generate_frame_meta());
|
||||
|
||||
// pop away synced out object
|
||||
|
@ -33,8 +33,6 @@ public:
|
||||
|
||||
static std::map<std::string, mfxVariant> get_vpp_params(const std::vector<CfgParam> &cfg_params);
|
||||
private:
|
||||
ExecutionStatus execute_op(operation_t& op, EngineSession& sess) override;
|
||||
|
||||
void on_frame_ready(LegacyTranscodeSession& sess,
|
||||
mfxFrameSurface1* ready_surface);
|
||||
void validate_vpp_param(const mfxVideoParam& mfxVPPParams);
|
||||
|
@ -42,22 +42,7 @@ void LegacyTranscodeSession::init_transcode_surface_pool(VPLAccelerationPolicy::
|
||||
void LegacyTranscodeSession::swap_transcode_surface(VPLLegacyTranscodeEngine& engine) {
|
||||
VPLAccelerationPolicy* acceleration_policy = engine.get_accel();
|
||||
GAPI_Assert(acceleration_policy && "Empty acceleration_policy");
|
||||
try {
|
||||
auto cand = acceleration_policy->get_free_surface(vpp_out_pool_id).lock();
|
||||
|
||||
GAPI_LOG_DEBUG(nullptr, "[" << session << "] swap surface"
|
||||
", old: " << (!vpp_surface_ptr.expired()
|
||||
? vpp_surface_ptr.lock()->get_handle()
|
||||
: nullptr) <<
|
||||
", new: "<< cand->get_handle());
|
||||
|
||||
vpp_surface_ptr = cand;
|
||||
} catch (const std::runtime_error& ex) {
|
||||
GAPI_LOG_WARNING(nullptr, "[" << session << "] error: " << ex.what());
|
||||
|
||||
// Delegate exception processing on caller
|
||||
throw;
|
||||
}
|
||||
request_free_surface(session, vpp_out_pool_id, *acceleration_policy, vpp_surface_ptr);
|
||||
}
|
||||
|
||||
const mfxFrameInfo& LegacyTranscodeSession::get_video_param() const {
|
||||
|
@ -14,10 +14,7 @@ namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
namespace onevpl {
|
||||
|
||||
struct IDataProvider;
|
||||
class Surface;
|
||||
struct VPLAccelerationPolicy;
|
||||
|
||||
class GAPI_EXPORTS LegacyTranscodeSession : public LegacyDecodeSession {
|
||||
public:
|
||||
@ -33,8 +30,8 @@ public:
|
||||
const mfxFrameInfo& get_video_param() const override;
|
||||
private:
|
||||
mfxVideoParam mfx_transcoder_param;
|
||||
|
||||
VPLAccelerationPolicy::pool_key_t vpp_out_pool_id;
|
||||
|
||||
std::weak_ptr<Surface> vpp_surface_ptr;
|
||||
std::queue<op_handle_t> vpp_queue;
|
||||
};
|
||||
|
@ -16,6 +16,9 @@
|
||||
|
||||
#include <vpl/mfx.h>
|
||||
#include <vpl/mfxvideo.h>
|
||||
|
||||
extern mfxLoader mfx_handle;
|
||||
extern int impl_number;
|
||||
#endif // HAVE_ONEVPL
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
|
@ -36,6 +36,10 @@ GMetaArg GSource::Priv::descr_of() const {
|
||||
|
||||
#else // HAVE_ONEVPL
|
||||
|
||||
// TODO global variable move it into Source after CloneSession issue resolving
|
||||
mfxLoader mfx_handle = MFXLoad();
|
||||
int impl_number = 0;
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
namespace wip {
|
||||
@ -47,7 +51,7 @@ enum {
|
||||
};
|
||||
|
||||
GSource::Priv::Priv() :
|
||||
mfx_handle(MFXLoad()),
|
||||
// mfx_handle(MFXLoad()),
|
||||
mfx_impl_description(),
|
||||
mfx_handle_configs(),
|
||||
cfg_params(),
|
||||
@ -187,7 +191,8 @@ GSource::Priv::Priv(std::shared_ptr<IDataProvider> provider,
|
||||
GAPI_Assert(max_match_it != matches_count.rend() &&
|
||||
"Cannot find matched MFX implementation for requested configuration");
|
||||
|
||||
int impl_number = max_match_it->second;
|
||||
// TODO impl_number is global for now
|
||||
impl_number = max_match_it->second;
|
||||
GAPI_LOG_INFO(nullptr, "Chosen implementation index: " << impl_number);
|
||||
|
||||
// release unusable impl available_impl_descriptions
|
||||
@ -261,7 +266,7 @@ GSource::Priv::~Priv() {
|
||||
GAPI_LOG_INFO(nullptr, "Unload MFX implementation description: " << mfx_impl_description);
|
||||
MFXDispReleaseImplDescription(mfx_handle, mfx_impl_description);
|
||||
GAPI_LOG_INFO(nullptr, "Unload MFX handle: " << mfx_handle);
|
||||
MFXUnload(mfx_handle);
|
||||
//MFXUnload(mfx_handle);
|
||||
}
|
||||
|
||||
std::unique_ptr<VPLAccelerationPolicy> GSource::Priv::initializeHWAccel(std::shared_ptr<IDeviceSelector> selector)
|
||||
|
@ -44,7 +44,8 @@ private:
|
||||
Priv();
|
||||
std::unique_ptr<VPLAccelerationPolicy> initializeHWAccel(std::shared_ptr<IDeviceSelector> selector);
|
||||
|
||||
mfxLoader mfx_handle;
|
||||
// TODO not it is global variable. Waiting for FIX issue with CloneSession
|
||||
// mfxLoader mfx_handle;
|
||||
mfxImplDescription *mfx_impl_description;
|
||||
std::vector<mfxConfig> mfx_handle_configs;
|
||||
std::vector<CfgParam> cfg_params;
|
||||
|
@ -25,6 +25,8 @@
|
||||
#define APPEND_STRINGIFY_MASK_N_ERASE(value, pref, mask) \
|
||||
if (value & mask) { ss << pref << #mask; value ^= mask; }
|
||||
|
||||
#define DUMP_MEMBER(stream, object, member) \
|
||||
stream << #member << ": " << object.member << "\n";
|
||||
|
||||
namespace cv {
|
||||
namespace gapi {
|
||||
@ -359,6 +361,42 @@ std::string mfxstatus_to_string(mfxStatus err) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string mfx_frame_info_to_string(const mfxFrameInfo &info) {
|
||||
std::stringstream ss;
|
||||
DUMP_MEMBER(ss, info, FrameRateExtN)
|
||||
DUMP_MEMBER(ss, info, FrameRateExtD)
|
||||
DUMP_MEMBER(ss, info, AspectRatioW)
|
||||
DUMP_MEMBER(ss, info, AspectRatioH)
|
||||
DUMP_MEMBER(ss, info, CropX)
|
||||
DUMP_MEMBER(ss, info, CropY)
|
||||
DUMP_MEMBER(ss, info, CropW)
|
||||
DUMP_MEMBER(ss, info, CropH)
|
||||
DUMP_MEMBER(ss, info, ChannelId)
|
||||
DUMP_MEMBER(ss, info, BitDepthLuma)
|
||||
DUMP_MEMBER(ss, info, BitDepthChroma)
|
||||
DUMP_MEMBER(ss, info, Shift)
|
||||
DUMP_MEMBER(ss, info, FourCC)
|
||||
DUMP_MEMBER(ss, info, Width)
|
||||
DUMP_MEMBER(ss, info, Height)
|
||||
DUMP_MEMBER(ss, info, BufferSize)
|
||||
DUMP_MEMBER(ss, info, PicStruct)
|
||||
DUMP_MEMBER(ss, info, ChromaFormat);
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
int compare(const mfxFrameInfo &lhs, const mfxFrameInfo &rhs) {
|
||||
//NB: mfxFrameInfo is a `packed` struct declared in VPL
|
||||
return memcmp(&lhs, &rhs, sizeof(mfxFrameInfo));
|
||||
}
|
||||
|
||||
bool operator< (const mfxFrameInfo &lhs, const mfxFrameInfo &rhs) {
|
||||
return (compare(lhs, rhs) < 0);
|
||||
}
|
||||
|
||||
bool operator== (const mfxFrameInfo &lhs, const mfxFrameInfo &rhs) {
|
||||
return (compare(lhs, rhs) == 0);
|
||||
}
|
||||
|
||||
std::string ext_mem_frame_type_to_cstr(int type) {
|
||||
std::stringstream ss;
|
||||
APPEND_STRINGIFY_MASK_N_ERASE(type, "|", MFX_MEMTYPE_DXVA2_DECODER_TARGET);
|
||||
|
@ -76,6 +76,10 @@ mfxU32 cstr_to_mfx_version(const char* cstr);
|
||||
std::string GAPI_EXPORTS mfxstatus_to_string(int64_t err);
|
||||
std::string GAPI_EXPORTS mfxstatus_to_string(mfxStatus err);
|
||||
|
||||
std::string mfx_frame_info_to_string(const mfxFrameInfo &info);
|
||||
bool operator< (const mfxFrameInfo &lhs, const mfxFrameInfo &rhs);
|
||||
bool operator== (const mfxFrameInfo &lhs, const mfxFrameInfo &rhs);
|
||||
|
||||
std::ostream& operator<< (std::ostream& out, const mfxImplDescription& idesc);
|
||||
|
||||
std::string ext_mem_frame_type_to_cstr(int type);
|
||||
|
@ -68,7 +68,7 @@ struct EmptyDataProvider : public cv::gapi::wip::onevpl::IDataProvider {
|
||||
|
||||
struct TestProcessingSession : public cv::gapi::wip::onevpl::EngineSession {
|
||||
TestProcessingSession(mfxSession mfx_session) :
|
||||
EngineSession(mfx_session, {}) {
|
||||
EngineSession(mfx_session) {
|
||||
}
|
||||
|
||||
const mfxFrameInfo& get_video_param() const override {
|
||||
@ -319,7 +319,8 @@ TEST(OneVPL_Source_CPU_FrameAdapter, InitFrameAdapter)
|
||||
EXPECT_EQ(0, surf->get_locks_count());
|
||||
|
||||
{
|
||||
VPLMediaFrameCPUAdapter adapter(surf);
|
||||
mfxSession stub_session = reinterpret_cast<mfxSession>(0x1);
|
||||
VPLMediaFrameCPUAdapter adapter(surf, stub_session);
|
||||
EXPECT_EQ(1, surf->get_locks_count());
|
||||
}
|
||||
EXPECT_EQ(0, surf->get_locks_count());
|
||||
@ -528,9 +529,9 @@ TEST(OneVPL_Source_DX11_Accel, Init)
|
||||
cfg_params_w_dx11.push_back(CfgParam::create_acceleration_mode(MFX_ACCEL_MODE_VIA_D3D11));
|
||||
VPLDX11AccelerationPolicy accel(std::make_shared<CfgParamDeviceSelector>(cfg_params_w_dx11));
|
||||
|
||||
mfxLoader mfx_handle = MFXLoad();
|
||||
mfxLoader test_mfx_handle = MFXLoad();
|
||||
|
||||
mfxConfig cfg_inst_0 = MFXCreateConfig(mfx_handle);
|
||||
mfxConfig cfg_inst_0 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_0);
|
||||
mfxVariant mfx_param_0;
|
||||
mfx_param_0.Type = MFX_VARIANT_TYPE_U32;
|
||||
@ -538,7 +539,7 @@ TEST(OneVPL_Source_DX11_Accel, Init)
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_0,(mfxU8 *)CfgParam::implementation_name(),
|
||||
mfx_param_0), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_1 = MFXCreateConfig(mfx_handle);
|
||||
mfxConfig cfg_inst_1 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_1);
|
||||
mfxVariant mfx_param_1;
|
||||
mfx_param_1.Type = MFX_VARIANT_TYPE_U32;
|
||||
@ -546,7 +547,7 @@ TEST(OneVPL_Source_DX11_Accel, Init)
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_1,(mfxU8 *)CfgParam::acceleration_mode_name(),
|
||||
mfx_param_1), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_2 = MFXCreateConfig(mfx_handle);
|
||||
mfxConfig cfg_inst_2 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_2);
|
||||
mfxVariant mfx_param_2;
|
||||
mfx_param_2.Type = MFX_VARIANT_TYPE_U32;
|
||||
@ -556,7 +557,7 @@ TEST(OneVPL_Source_DX11_Accel, Init)
|
||||
|
||||
// create session
|
||||
mfxSession mfx_session{};
|
||||
mfxStatus sts = MFXCreateSession(mfx_handle, 0, &mfx_session);
|
||||
mfxStatus sts = MFXCreateSession(test_mfx_handle, 0, &mfx_session);
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// assign acceleration
|
||||
@ -600,7 +601,7 @@ TEST(OneVPL_Source_DX11_Accel, Init)
|
||||
|
||||
EXPECT_NO_THROW(accel.deinit(mfx_session));
|
||||
MFXClose(mfx_session);
|
||||
MFXUnload(mfx_handle);
|
||||
MFXUnload(test_mfx_handle);
|
||||
}
|
||||
|
||||
TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
@ -611,9 +612,9 @@ TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
cfg_params_w_dx11.push_back(CfgParam::create_acceleration_mode(MFX_ACCEL_MODE_VIA_D3D11));
|
||||
std::unique_ptr<VPLAccelerationPolicy> acceleration_policy (new VPLDX11AccelerationPolicy(std::make_shared<CfgParamDeviceSelector>(cfg_params_w_dx11)));
|
||||
|
||||
mfxLoader mfx_handle = MFXLoad();
|
||||
mfxLoader test_mfx_handle = MFXLoad();
|
||||
|
||||
mfxConfig cfg_inst_0 = MFXCreateConfig(mfx_handle);
|
||||
mfxConfig cfg_inst_0 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_0);
|
||||
mfxVariant mfx_param_0;
|
||||
mfx_param_0.Type = MFX_VARIANT_TYPE_U32;
|
||||
@ -621,7 +622,7 @@ TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_0,(mfxU8 *)CfgParam::implementation_name(),
|
||||
mfx_param_0), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_1 = MFXCreateConfig(mfx_handle);
|
||||
mfxConfig cfg_inst_1 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_1);
|
||||
mfxVariant mfx_param_1;
|
||||
mfx_param_1.Type = MFX_VARIANT_TYPE_U32;
|
||||
@ -629,7 +630,7 @@ TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_1,(mfxU8 *)CfgParam::acceleration_mode_name(),
|
||||
mfx_param_1), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_2 = MFXCreateConfig(mfx_handle);
|
||||
mfxConfig cfg_inst_2 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_2);
|
||||
mfxVariant mfx_param_2;
|
||||
mfx_param_2.Type = MFX_VARIANT_TYPE_U32;
|
||||
@ -637,7 +638,7 @@ TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_2,(mfxU8 *)CfgParam::decoder_id_name(),
|
||||
mfx_param_2), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_3 = MFXCreateConfig(mfx_handle);
|
||||
mfxConfig cfg_inst_3 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_3);
|
||||
mfxVariant mfx_param_3;
|
||||
mfx_param_3.Type = MFX_VARIANT_TYPE_U32;
|
||||
@ -647,7 +648,7 @@ TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
mfx_param_3), MFX_ERR_NONE);
|
||||
// create session
|
||||
mfxSession mfx_session{};
|
||||
mfxStatus sts = MFXCreateSession(mfx_handle, 0, &mfx_session);
|
||||
mfxStatus sts = MFXCreateSession(test_mfx_handle, 0, &mfx_session);
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// assign acceleration
|
||||
@ -732,7 +733,7 @@ TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
sess_ptr->init_transcode_surface_pool(vpp_out_pool_key);
|
||||
|
||||
// prepare working surfaces
|
||||
sess_ptr->swap_surface(engine);
|
||||
sess_ptr->swap_decode_surface(engine);
|
||||
sess_ptr->swap_transcode_surface(engine);
|
||||
|
||||
// launch pipeline
|
||||
@ -756,11 +757,8 @@ TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
{
|
||||
my_sess.last_status =
|
||||
MFXVideoDECODE_DecodeFrameAsync(my_sess.session,
|
||||
(my_sess.data_provider || (my_sess.stream && my_sess.stream->DataLength))
|
||||
? my_sess.stream.get()
|
||||
|
||||
: nullptr, /* No more data to read, start decode draining mode*/
|
||||
my_sess.procesing_surface_ptr.lock()->get_handle(),
|
||||
my_sess.get_mfx_bitstream_ptr(),
|
||||
my_sess.processing_surface_ptr.lock()->get_handle(),
|
||||
&sync_pair.second,
|
||||
&sync_pair.first);
|
||||
|
||||
@ -771,12 +769,12 @@ TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
my_sess.last_status == MFX_WRN_DEVICE_BUSY) {
|
||||
try {
|
||||
if (my_sess.last_status == MFX_ERR_MORE_SURFACE) {
|
||||
my_sess.swap_surface(engine);
|
||||
my_sess.swap_decode_surface(engine);
|
||||
}
|
||||
my_sess.last_status =
|
||||
MFXVideoDECODE_DecodeFrameAsync(my_sess.session,
|
||||
my_sess.stream.get(),
|
||||
my_sess.procesing_surface_ptr.lock()->get_handle(),
|
||||
my_sess.get_mfx_bitstream_ptr(),
|
||||
my_sess.processing_surface_ptr.lock()->get_handle(),
|
||||
&sync_pair.second,
|
||||
&sync_pair.first);
|
||||
|
||||
@ -808,6 +806,224 @@ TEST(OneVPL_Source_DX11_Accel_VPL, Init)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(OneVPL_Source_DX11_Accel_VPL, preproc)
|
||||
{
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
|
||||
std::vector<CfgParam> cfg_params_w_dx11;
|
||||
cfg_params_w_dx11.push_back(CfgParam::create_acceleration_mode(MFX_ACCEL_MODE_VIA_D3D11));
|
||||
std::unique_ptr<VPLAccelerationPolicy> acceleration_policy (new VPLDX11AccelerationPolicy(std::make_shared<CfgParamDeviceSelector>(cfg_params_w_dx11)));
|
||||
|
||||
mfxLoader test_mfx_handle = MFXLoad();
|
||||
|
||||
mfxConfig cfg_inst_0 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_0);
|
||||
mfxVariant mfx_param_0;
|
||||
mfx_param_0.Type = MFX_VARIANT_TYPE_U32;
|
||||
mfx_param_0.Data.U32 = MFX_IMPL_TYPE_HARDWARE;
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_0,(mfxU8 *)CfgParam::implementation_name(),
|
||||
mfx_param_0), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_1 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_1);
|
||||
mfxVariant mfx_param_1;
|
||||
mfx_param_1.Type = MFX_VARIANT_TYPE_U32;
|
||||
mfx_param_1.Data.U32 = MFX_ACCEL_MODE_VIA_D3D11;
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_1,(mfxU8 *)CfgParam::acceleration_mode_name(),
|
||||
mfx_param_1), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_2 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_2);
|
||||
mfxVariant mfx_param_2;
|
||||
mfx_param_2.Type = MFX_VARIANT_TYPE_U32;
|
||||
mfx_param_2.Data.U32 = MFX_CODEC_HEVC;
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_2,(mfxU8 *)CfgParam::decoder_id_name(),
|
||||
mfx_param_2), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_3 = MFXCreateConfig(test_mfx_handle);
|
||||
EXPECT_TRUE(cfg_inst_3);
|
||||
mfxVariant mfx_param_3;
|
||||
mfx_param_3.Type = MFX_VARIANT_TYPE_U32;
|
||||
mfx_param_3.Data.U32 = MFX_EXTBUFF_VPP_SCALING;
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_3,
|
||||
(mfxU8 *)"mfxImplDescription.mfxVPPDescription.filter.FilterFourCC",
|
||||
mfx_param_3), MFX_ERR_NONE);
|
||||
// create session
|
||||
mfxSession mfx_decode_session{};
|
||||
mfxStatus sts = MFXCreateSession(test_mfx_handle, 0, &mfx_decode_session);
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// assign acceleration
|
||||
EXPECT_NO_THROW(acceleration_policy->init(mfx_decode_session));
|
||||
|
||||
// create proper bitstream
|
||||
std::string file_path = findDataFile("highgui/video/big_buck_bunny.h265");
|
||||
std::shared_ptr<IDataProvider> data_provider(new FileDataProvider(file_path,
|
||||
{CfgParam::create_decoder_id(MFX_CODEC_HEVC)}));
|
||||
IDataProvider::mfx_codec_id_type decoder_id_name = data_provider->get_mfx_codec_id();
|
||||
|
||||
// Prepare video param
|
||||
mfxVideoParam mfxDecParams {};
|
||||
mfxDecParams.mfx.CodecId = decoder_id_name;
|
||||
mfxDecParams.IOPattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
|
||||
|
||||
// try fetch & decode input data
|
||||
sts = MFX_ERR_NONE;
|
||||
std::shared_ptr<IDataProvider::mfx_bitstream> bitstream{};
|
||||
do {
|
||||
EXPECT_TRUE(data_provider->fetch_bitstream_data(bitstream));
|
||||
sts = MFXVideoDECODE_DecodeHeader(mfx_decode_session, bitstream.get(), &mfxDecParams);
|
||||
EXPECT_TRUE(MFX_ERR_NONE == sts || MFX_ERR_MORE_DATA == sts);
|
||||
} while (sts == MFX_ERR_MORE_DATA && !data_provider->empty());
|
||||
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
mfxFrameAllocRequest request{};
|
||||
memset(&request, 0, sizeof(request));
|
||||
sts = MFXVideoDECODE_QueryIOSurf(mfx_decode_session, &mfxDecParams, &request);
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// Allocate surfaces for decoder
|
||||
request.Type |= MFX_MEMTYPE_EXTERNAL_FRAME | MFX_MEMTYPE_FROM_DECODE | MFX_MEMTYPE_FROM_VPPIN;
|
||||
VPLAccelerationPolicy::pool_key_t decode_pool_key = acceleration_policy->create_surface_pool(request,
|
||||
mfxDecParams.mfx.FrameInfo);
|
||||
sts = MFXVideoDECODE_Init(mfx_decode_session, &mfxDecParams);
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// initialize VPL session
|
||||
mfxSession mfx_vpl_session{};
|
||||
sts = MFXCreateSession(test_mfx_handle, 0, &mfx_vpl_session);
|
||||
// assign acceleration
|
||||
EXPECT_NO_THROW(acceleration_policy->init(mfx_vpl_session));
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// request VPL surface
|
||||
mfxU16 vppOutImgWidth = 672;
|
||||
mfxU16 vppOutImgHeight = 382;
|
||||
|
||||
mfxVideoParam mfxVPPParams{0};
|
||||
mfxVPPParams.vpp.In = mfxDecParams.mfx.FrameInfo;
|
||||
|
||||
mfxVPPParams.vpp.Out.FourCC = MFX_FOURCC_NV12;
|
||||
mfxVPPParams.vpp.Out.ChromaFormat = MFX_CHROMAFORMAT_YUV420;
|
||||
mfxVPPParams.vpp.Out.Width = ALIGN16(vppOutImgWidth);
|
||||
mfxVPPParams.vpp.Out.Height = ALIGN16(vppOutImgHeight);
|
||||
mfxVPPParams.vpp.Out.CropX = 0;
|
||||
mfxVPPParams.vpp.Out.CropY = 0;
|
||||
mfxVPPParams.vpp.Out.CropW = vppOutImgWidth;
|
||||
mfxVPPParams.vpp.Out.CropH = vppOutImgHeight;
|
||||
mfxVPPParams.vpp.Out.PicStruct = MFX_PICSTRUCT_PROGRESSIVE;
|
||||
mfxVPPParams.vpp.Out.FrameRateExtN = 30;
|
||||
mfxVPPParams.vpp.Out.FrameRateExtD = 1;
|
||||
|
||||
mfxVPPParams.IOPattern = MFX_IOPATTERN_IN_VIDEO_MEMORY | MFX_IOPATTERN_OUT_VIDEO_MEMORY;
|
||||
|
||||
mfxFrameAllocRequest vppRequests[2];
|
||||
memset(&vppRequests, 0, sizeof(mfxFrameAllocRequest) * 2);
|
||||
EXPECT_EQ(MFXVideoVPP_QueryIOSurf(mfx_vpl_session, &mfxVPPParams, vppRequests), MFX_ERR_NONE);
|
||||
|
||||
vppRequests[1].AllocId = 666;
|
||||
VPLAccelerationPolicy::pool_key_t vpp_out_pool_key =
|
||||
acceleration_policy->create_surface_pool(vppRequests[1], mfxVPPParams.vpp.Out);
|
||||
EXPECT_EQ(MFXVideoVPP_Init(mfx_vpl_session, &mfxVPPParams), MFX_ERR_NONE);
|
||||
|
||||
// finalize session creation
|
||||
DecoderParams d_param{bitstream, mfxDecParams};
|
||||
TranscoderParams t_param{mfxVPPParams};
|
||||
VPLLegacyDecodeEngine engine(std::move(acceleration_policy));
|
||||
std::shared_ptr<LegacyDecodeSession> sess_ptr =
|
||||
engine.register_session<LegacyDecodeSession>(
|
||||
mfx_decode_session,
|
||||
std::move(d_param),
|
||||
data_provider);
|
||||
|
||||
sess_ptr->init_surface_pool(decode_pool_key);
|
||||
|
||||
// prepare working surfaces
|
||||
sess_ptr->swap_decode_surface(engine);
|
||||
|
||||
// launch pipeline
|
||||
LegacyDecodeSession &my_sess = *sess_ptr;
|
||||
|
||||
size_t min_available_frames_count =
|
||||
std::min(engine.get_accel()->get_surface_count(decode_pool_key),
|
||||
engine.get_accel()->get_surface_count(vpp_out_pool_key));
|
||||
size_t frame_num = 0;
|
||||
do {
|
||||
if (!my_sess.data_provider) {
|
||||
my_sess.last_status = MFX_ERR_MORE_DATA;
|
||||
} else {
|
||||
my_sess.last_status = MFX_ERR_NONE;
|
||||
if (!my_sess.data_provider->fetch_bitstream_data(my_sess.stream)) {
|
||||
my_sess.last_status = MFX_ERR_MORE_DATA;
|
||||
my_sess.data_provider.reset(); //close source
|
||||
}
|
||||
}
|
||||
|
||||
// 2) enqueue ASYNC decode operation
|
||||
// prepare sync object for new surface
|
||||
LegacyTranscodeSession::op_handle_t sync_pair{};
|
||||
|
||||
// enqueue decode operation with current session surface
|
||||
{
|
||||
my_sess.last_status =
|
||||
MFXVideoDECODE_DecodeFrameAsync(my_sess.session,
|
||||
my_sess.get_mfx_bitstream_ptr(),
|
||||
my_sess.processing_surface_ptr.lock()->get_handle(),
|
||||
&sync_pair.second,
|
||||
&sync_pair.first);
|
||||
|
||||
// process wait-like statuses in-place:
|
||||
// It had better to use up all VPL decoding resources in pipeline
|
||||
// as soon as possible. So waiting more free-surface or device free
|
||||
while (my_sess.last_status == MFX_ERR_MORE_SURFACE ||
|
||||
my_sess.last_status == MFX_WRN_DEVICE_BUSY) {
|
||||
try {
|
||||
if (my_sess.last_status == MFX_ERR_MORE_SURFACE) {
|
||||
my_sess.swap_decode_surface(engine);
|
||||
}
|
||||
my_sess.last_status =
|
||||
MFXVideoDECODE_DecodeFrameAsync(my_sess.session,
|
||||
my_sess.get_mfx_bitstream_ptr(),
|
||||
my_sess.processing_surface_ptr.lock()->get_handle(),
|
||||
&sync_pair.second,
|
||||
&sync_pair.first);
|
||||
|
||||
} catch (const std::runtime_error&) {
|
||||
// NB: not an error, yield CPU ticks to check
|
||||
// surface availability at a next phase.
|
||||
EXPECT_TRUE(false);
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
do {
|
||||
my_sess.last_status = MFXVideoCORE_SyncOperation(my_sess.session, sync_pair.first, 0);
|
||||
// put frames in ready queue on success
|
||||
if (MFX_ERR_NONE == my_sess.last_status) {
|
||||
break;
|
||||
}
|
||||
} while (MFX_WRN_IN_EXECUTION == my_sess.last_status);
|
||||
EXPECT_EQ(my_sess.last_status, MFX_ERR_NONE);
|
||||
}
|
||||
|
||||
// perform VPP operation on decoder synchronized surface
|
||||
|
||||
auto vpp_out = engine.get_accel()->get_free_surface(vpp_out_pool_key).lock();
|
||||
EXPECT_TRUE(vpp_out.get());
|
||||
my_sess.last_status = MFXVideoVPP_RunFrameVPPAsync(mfx_vpl_session,
|
||||
sync_pair.second,
|
||||
vpp_out->get_handle(),
|
||||
nullptr, &sync_pair.first);
|
||||
if (my_sess.last_status == MFX_ERR_MORE_SURFACE ||
|
||||
my_sess.last_status == MFX_ERR_NONE) {
|
||||
my_sess.last_status = MFXVideoCORE_SyncOperation(mfx_vpl_session, sync_pair.first, INFINITE);
|
||||
EXPECT_EQ(my_sess.last_status, MFX_ERR_NONE);
|
||||
frame_num++;
|
||||
}
|
||||
} while(frame_num < min_available_frames_count);
|
||||
}
|
||||
#endif // HAVE_DIRECTX
|
||||
#endif // HAVE_D3D11
|
||||
|
||||
|
@ -73,9 +73,9 @@ TEST_P(OneVPL_Source_MFPAsyncDispatcherTest, open_and_decode_file)
|
||||
EXPECT_TRUE(dd_result);
|
||||
|
||||
// initialize MFX
|
||||
mfxLoader mfx_handle = MFXLoad();
|
||||
mfxLoader mfx = MFXLoad();
|
||||
|
||||
mfxConfig cfg_inst_0 = MFXCreateConfig(mfx_handle);
|
||||
mfxConfig cfg_inst_0 = MFXCreateConfig(mfx);
|
||||
EXPECT_TRUE(cfg_inst_0);
|
||||
mfxVariant mfx_param_0;
|
||||
mfx_param_0.Type = MFX_VARIANT_TYPE_U32;
|
||||
@ -85,7 +85,7 @@ TEST_P(OneVPL_Source_MFPAsyncDispatcherTest, open_and_decode_file)
|
||||
|
||||
// create MFX session
|
||||
mfxSession mfx_session{};
|
||||
mfxStatus sts = MFXCreateSession(mfx_handle, 0, &mfx_session);
|
||||
mfxStatus sts = MFXCreateSession(mfx, 0, &mfx_session);
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// create proper bitstream
|
||||
@ -112,7 +112,7 @@ TEST_P(OneVPL_Source_MFPAsyncDispatcherTest, open_and_decode_file)
|
||||
|
||||
MFXVideoDECODE_Close(mfx_session);
|
||||
MFXClose(mfx_session);
|
||||
MFXUnload(mfx_handle);
|
||||
MFXUnload(mfx);
|
||||
}
|
||||
|
||||
|
||||
|
495
modules/gapi/test/streaming/gapi_streaming_vpp_preproc_test.cpp
Normal file
495
modules/gapi/test/streaming/gapi_streaming_vpp_preproc_test.cpp
Normal file
@ -0,0 +1,495 @@
|
||||
// This file is part of OpenCV project.
|
||||
// It is subject to the license terms in the LICENSE file found in the top-level directory
|
||||
// of this distribution and at http://opencv.org/license.html.
|
||||
//
|
||||
// Copyright (C) 2022 Intel Corporation
|
||||
|
||||
|
||||
#include "../test_precomp.hpp"
|
||||
|
||||
#include "../common/gapi_tests_common.hpp"
|
||||
#include "../common/gapi_streaming_tests_common.hpp"
|
||||
|
||||
#include <chrono>
|
||||
#include <future>
|
||||
#include <tuple>
|
||||
|
||||
#include <opencv2/gapi/media.hpp>
|
||||
#include <opencv2/gapi/cpu/core.hpp>
|
||||
#include <opencv2/gapi/cpu/imgproc.hpp>
|
||||
|
||||
#include <opencv2/gapi/fluid/core.hpp>
|
||||
#include <opencv2/gapi/fluid/imgproc.hpp>
|
||||
#include <opencv2/gapi/fluid/gfluidkernel.hpp>
|
||||
|
||||
#include <opencv2/gapi/ocl/core.hpp>
|
||||
#include <opencv2/gapi/ocl/imgproc.hpp>
|
||||
|
||||
#include <opencv2/gapi/streaming/cap.hpp>
|
||||
#include <opencv2/gapi/streaming/desync.hpp>
|
||||
#include <opencv2/gapi/streaming/format.hpp>
|
||||
|
||||
#ifdef HAVE_ONEVPL
|
||||
|
||||
#include <opencv2/gapi/streaming/onevpl/data_provider_interface.hpp>
|
||||
#include "streaming/onevpl/file_data_provider.hpp"
|
||||
#include "streaming/onevpl/cfg_param_device_selector.hpp"
|
||||
|
||||
#include "streaming/onevpl/accelerators/surface/surface.hpp"
|
||||
#include "streaming/onevpl/accelerators/surface/cpu_frame_adapter.hpp"
|
||||
#include "streaming/onevpl/accelerators/surface/dx11_frame_adapter.hpp"
|
||||
#include "streaming/onevpl/accelerators/accel_policy_cpu.hpp"
|
||||
#include "streaming/onevpl/accelerators/accel_policy_dx11.hpp"
|
||||
#include "streaming/onevpl/accelerators/dx11_alloc_resource.hpp"
|
||||
#include "streaming/onevpl/accelerators/utils/shared_lock.hpp"
|
||||
#define private public
|
||||
#define protected public
|
||||
#include "streaming/onevpl/engine/decode/decode_engine_legacy.hpp"
|
||||
#include "streaming/onevpl/engine/decode/decode_session.hpp"
|
||||
|
||||
#include "streaming/onevpl/engine/preproc/preproc_engine.hpp"
|
||||
#include "streaming/onevpl/engine/preproc/preproc_session.hpp"
|
||||
|
||||
#include "streaming/onevpl/engine/transcode/transcode_engine_legacy.hpp"
|
||||
#include "streaming/onevpl/engine/transcode/transcode_session.hpp"
|
||||
#undef protected
|
||||
#undef private
|
||||
#include "logger.hpp"
|
||||
|
||||
#define ALIGN16(value) (((value + 15) >> 4) << 4)
|
||||
|
||||
namespace opencv_test
|
||||
{
|
||||
namespace
|
||||
{
|
||||
template<class ProcessingEngine>
|
||||
cv::MediaFrame extract_decoded_frame(mfxSession sessId, ProcessingEngine& engine) {
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
ProcessingEngineBase::ExecutionStatus status = ProcessingEngineBase::ExecutionStatus::Continue;
|
||||
while (0 == engine.get_ready_frames_count() &&
|
||||
status == ProcessingEngineBase::ExecutionStatus::Continue) {
|
||||
status = engine.process(sessId);
|
||||
}
|
||||
|
||||
if (engine.get_ready_frames_count() == 0) {
|
||||
GAPI_LOG_WARNING(nullptr, "failed: cannot obtain preprocessed frames, last status: " <<
|
||||
ProcessingEngineBase::status_to_string(status));
|
||||
throw std::runtime_error("cannot finalize VPP preprocessing operation");
|
||||
}
|
||||
cv::gapi::wip::Data data;
|
||||
engine.get_frame(data);
|
||||
return cv::util::get<cv::MediaFrame>(data);
|
||||
}
|
||||
|
||||
std::tuple<mfxLoader, mfxConfig> prepare_mfx(int mfx_codec, int mfx_accel_mode) {
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
mfxLoader mfx = MFXLoad();
|
||||
mfxConfig cfg_inst_0 = MFXCreateConfig(mfx);
|
||||
EXPECT_TRUE(cfg_inst_0);
|
||||
mfxVariant mfx_param_0;
|
||||
mfx_param_0.Type = MFX_VARIANT_TYPE_U32;
|
||||
mfx_param_0.Data.U32 = MFX_IMPL_TYPE_HARDWARE;
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_0,(mfxU8 *)CfgParam::implementation_name(),
|
||||
mfx_param_0), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_1 = MFXCreateConfig(mfx);
|
||||
EXPECT_TRUE(cfg_inst_1);
|
||||
mfxVariant mfx_param_1;
|
||||
mfx_param_1.Type = MFX_VARIANT_TYPE_U32;
|
||||
mfx_param_1.Data.U32 = mfx_accel_mode;
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_1,(mfxU8 *)CfgParam::acceleration_mode_name(),
|
||||
mfx_param_1), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_2 = MFXCreateConfig(mfx);
|
||||
EXPECT_TRUE(cfg_inst_2);
|
||||
mfxVariant mfx_param_2;
|
||||
mfx_param_2.Type = MFX_VARIANT_TYPE_U32;
|
||||
mfx_param_2.Data.U32 = mfx_codec;
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_2,(mfxU8 *)CfgParam::decoder_id_name(),
|
||||
mfx_param_2), MFX_ERR_NONE);
|
||||
|
||||
mfxConfig cfg_inst_3 = MFXCreateConfig(mfx);
|
||||
EXPECT_TRUE(cfg_inst_3);
|
||||
mfxVariant mfx_param_3;
|
||||
mfx_param_3.Type = MFX_VARIANT_TYPE_U32;
|
||||
mfx_param_3.Data.U32 = MFX_EXTBUFF_VPP_SCALING;
|
||||
EXPECT_EQ(MFXSetConfigFilterProperty(cfg_inst_3,
|
||||
(mfxU8 *)"mfxImplDescription.mfxVPPDescription.filter.FilterFourCC",
|
||||
mfx_param_3), MFX_ERR_NONE);
|
||||
return std::make_tuple(mfx, cfg_inst_3);
|
||||
}
|
||||
|
||||
class SafeQueue {
|
||||
public:
|
||||
void push(cv::MediaFrame&& f) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
queue.push(std::move(f));
|
||||
cv.notify_all();
|
||||
}
|
||||
|
||||
cv::MediaFrame pop() {
|
||||
cv::MediaFrame ret;
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
cv.wait(lock, [this] () {
|
||||
return !queue.empty();
|
||||
});
|
||||
ret = queue.front();
|
||||
queue.pop();
|
||||
return ret;
|
||||
}
|
||||
|
||||
void push_stop() {
|
||||
push(cv::MediaFrame::Create<IStopAdapter>());
|
||||
}
|
||||
|
||||
static bool is_stop(const cv::MediaFrame &f) {
|
||||
try {
|
||||
return f.get<IStopAdapter>();
|
||||
} catch(...) {}
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
struct IStopAdapter final : public cv::MediaFrame::IAdapter {
|
||||
~IStopAdapter() {}
|
||||
cv::GFrameDesc meta() const { return {}; };
|
||||
MediaFrame::View access(MediaFrame::Access) { return {{}, {}}; };
|
||||
};
|
||||
private:
|
||||
std::condition_variable cv;
|
||||
std::mutex mutex;
|
||||
std::queue<cv::MediaFrame> queue;
|
||||
};
|
||||
|
||||
struct EmptyDataProvider : public cv::gapi::wip::onevpl::IDataProvider {
|
||||
|
||||
bool empty() const override {
|
||||
return true;
|
||||
}
|
||||
mfx_codec_id_type get_mfx_codec_id() const override {
|
||||
return std::numeric_limits<uint32_t>::max();
|
||||
}
|
||||
bool fetch_bitstream_data(std::shared_ptr<mfx_bitstream> &) override {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
using source_t = std::string;
|
||||
using decoder_t = int;
|
||||
using acceleration_t = int;
|
||||
using out_frame_info_t = cv::GFrameDesc;
|
||||
using preproc_args_t = std::tuple<source_t, decoder_t, acceleration_t, out_frame_info_t>;
|
||||
|
||||
class VPPPreprocParams : public ::testing::TestWithParam<preproc_args_t> {};
|
||||
|
||||
preproc_args_t files[] = {
|
||||
preproc_args_t {"highgui/video/big_buck_bunny.h264",
|
||||
MFX_CODEC_AVC, MFX_ACCEL_MODE_VIA_D3D11,
|
||||
cv::GFrameDesc {cv::MediaFormat::NV12, {1920, 1080}}},
|
||||
preproc_args_t {"highgui/video/big_buck_bunny.h265",
|
||||
MFX_CODEC_HEVC, MFX_ACCEL_MODE_VIA_D3D11,
|
||||
cv::GFrameDesc {cv::MediaFormat::NV12, {1920, 1280}}}
|
||||
};
|
||||
|
||||
#ifdef HAVE_DIRECTX
|
||||
#ifdef HAVE_D3D11
|
||||
TEST(OneVPL_Source_PreprocEngine, functional_single_thread)
|
||||
{
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
using namespace cv::gapi::wip;
|
||||
|
||||
std::vector<CfgParam> cfg_params_w_dx11;
|
||||
cfg_params_w_dx11.push_back(CfgParam::create_acceleration_mode(MFX_ACCEL_MODE_VIA_D3D11));
|
||||
std::unique_ptr<VPLAccelerationPolicy> decode_accel_policy (
|
||||
new VPLDX11AccelerationPolicy(std::make_shared<CfgParamDeviceSelector>(cfg_params_w_dx11)));
|
||||
|
||||
// create file data provider
|
||||
std::string file_path = findDataFile("highgui/video/big_buck_bunny.h265");
|
||||
std::shared_ptr<IDataProvider> data_provider(new FileDataProvider(file_path,
|
||||
{CfgParam::create_decoder_id(MFX_CODEC_HEVC)}));
|
||||
|
||||
mfxLoader mfx{};
|
||||
mfxConfig mfx_cfg{};
|
||||
std::tie(mfx, mfx_cfg) = prepare_mfx(MFX_CODEC_HEVC, MFX_ACCEL_MODE_VIA_D3D11);
|
||||
|
||||
// create decode session
|
||||
mfxSession mfx_decode_session{};
|
||||
mfxStatus sts = MFXCreateSession(mfx, 0, &mfx_decode_session);
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// create decode engine
|
||||
auto device_selector = decode_accel_policy->get_device_selector();
|
||||
VPLLegacyDecodeEngine decode_engine(std::move(decode_accel_policy));
|
||||
auto sess_ptr = decode_engine.initialize_session(mfx_decode_session,
|
||||
cfg_params_w_dx11,
|
||||
data_provider);
|
||||
|
||||
// simulate net info
|
||||
cv::GFrameDesc required_frame_param {cv::MediaFormat::NV12,
|
||||
{1920, 1080}};
|
||||
|
||||
// create VPP preproc engine
|
||||
VPPPreprocEngine preproc_engine(std::unique_ptr<VPLAccelerationPolicy>{
|
||||
new VPLDX11AccelerationPolicy(device_selector)});
|
||||
|
||||
// launch pipeline
|
||||
// 1) decode frame
|
||||
cv::MediaFrame first_decoded_frame;
|
||||
ASSERT_NO_THROW(first_decoded_frame = extract_decoded_frame(sess_ptr->session, decode_engine));
|
||||
cv::GFrameDesc first_frame_decoded_desc = first_decoded_frame.desc();
|
||||
|
||||
// 1.5) create preproc session based on frame description & network info
|
||||
cv::util::optional<pp_params> first_pp_params = preproc_engine.is_applicable(first_decoded_frame);
|
||||
ASSERT_TRUE(first_pp_params.has_value());
|
||||
pp_session first_pp_sess = preproc_engine.initialize_preproc(first_pp_params.value(),
|
||||
required_frame_param);
|
||||
|
||||
// 2) make preproc using incoming decoded frame & preproc session
|
||||
cv::MediaFrame first_pp_frame = preproc_engine.run_sync(first_pp_sess, first_decoded_frame);
|
||||
cv::GFrameDesc first_outcome_pp_desc = first_pp_frame.desc();
|
||||
ASSERT_FALSE(first_frame_decoded_desc == first_outcome_pp_desc);
|
||||
|
||||
// do not hold media frames because they share limited DX11 surface pool resources
|
||||
first_decoded_frame = cv::MediaFrame();
|
||||
first_pp_frame = cv::MediaFrame();
|
||||
|
||||
// make test in loop
|
||||
bool in_progress = false;
|
||||
size_t frames_processed_count = 1;
|
||||
const auto &first_pp_param_value_impl =
|
||||
cv::util::get<cv::gapi::wip::onevpl::vpp_pp_params>(first_pp_params.value().value);
|
||||
try {
|
||||
while(true) {
|
||||
cv::MediaFrame decoded_frame = extract_decoded_frame(sess_ptr->session, decode_engine);
|
||||
in_progress = true;
|
||||
ASSERT_EQ(decoded_frame.desc(), first_frame_decoded_desc);
|
||||
|
||||
cv::util::optional<pp_params> params = preproc_engine.is_applicable(decoded_frame);
|
||||
ASSERT_TRUE(params.has_value());
|
||||
const auto &cur_pp_param_value_impl =
|
||||
cv::util::get<cv::gapi::wip::onevpl::vpp_pp_params>(params.value().value);
|
||||
|
||||
ASSERT_EQ(first_pp_param_value_impl.handle, cur_pp_param_value_impl.handle);
|
||||
ASSERT_TRUE(FrameInfoComparator::equal_to(first_pp_param_value_impl.info, cur_pp_param_value_impl.info));
|
||||
|
||||
pp_session pp_sess = preproc_engine.initialize_preproc(params.value(),
|
||||
required_frame_param);
|
||||
ASSERT_EQ(pp_sess.get<EngineSession>().get(),
|
||||
first_pp_sess.get<EngineSession>().get());
|
||||
|
||||
cv::MediaFrame pp_frame = preproc_engine.run_sync(pp_sess, decoded_frame);
|
||||
cv::GFrameDesc pp_desc = pp_frame.desc();
|
||||
ASSERT_TRUE(pp_desc == first_outcome_pp_desc);
|
||||
in_progress = false;
|
||||
frames_processed_count++;
|
||||
}
|
||||
} catch (...) {}
|
||||
|
||||
// test if interruption has happened
|
||||
ASSERT_FALSE(in_progress);
|
||||
ASSERT_NE(frames_processed_count, 1);
|
||||
}
|
||||
|
||||
|
||||
TEST_P(VPPPreprocParams, functional_different_threads)
|
||||
{
|
||||
using namespace cv::gapi::wip;
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
source_t file_path;
|
||||
decoder_t decoder_id;
|
||||
acceleration_t accel;
|
||||
out_frame_info_t required_frame_param;
|
||||
std::tie(file_path, decoder_id, accel, required_frame_param) = GetParam();
|
||||
|
||||
file_path = findDataFile(file_path);
|
||||
|
||||
std::vector<CfgParam> cfg_params_w_dx11;
|
||||
cfg_params_w_dx11.push_back(CfgParam::create_acceleration_mode(accel));
|
||||
std::unique_ptr<VPLAccelerationPolicy> decode_accel_policy (
|
||||
new VPLDX11AccelerationPolicy(std::make_shared<CfgParamDeviceSelector>(cfg_params_w_dx11)));
|
||||
|
||||
// create file data provider
|
||||
std::shared_ptr<IDataProvider> data_provider(new FileDataProvider(file_path,
|
||||
{CfgParam::create_decoder_id(decoder_id)}));
|
||||
|
||||
mfxLoader mfx{};
|
||||
mfxConfig mfx_cfg{};
|
||||
std::tie(mfx, mfx_cfg) = prepare_mfx(decoder_id, accel);
|
||||
|
||||
// create decode session
|
||||
mfxSession mfx_decode_session{};
|
||||
mfxStatus sts = MFXCreateSession(mfx, 0, &mfx_decode_session);
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// create decode engine
|
||||
auto device_selector = decode_accel_policy->get_device_selector();
|
||||
VPLLegacyDecodeEngine decode_engine(std::move(decode_accel_policy));
|
||||
auto sess_ptr = decode_engine.initialize_session(mfx_decode_session,
|
||||
cfg_params_w_dx11,
|
||||
data_provider);
|
||||
|
||||
// create VPP preproc engine
|
||||
VPPPreprocEngine preproc_engine(std::unique_ptr<VPLAccelerationPolicy>{
|
||||
new VPLDX11AccelerationPolicy(device_selector)});
|
||||
|
||||
// launch threads
|
||||
SafeQueue queue;
|
||||
size_t decoded_number = 1;
|
||||
size_t preproc_number = 0;
|
||||
|
||||
std::thread decode_thread([&decode_engine, sess_ptr,
|
||||
&queue, &decoded_number] () {
|
||||
// decode first frame
|
||||
{
|
||||
cv::MediaFrame decoded_frame;
|
||||
ASSERT_NO_THROW(decoded_frame = extract_decoded_frame(sess_ptr->session, decode_engine));
|
||||
queue.push(std::move(decoded_frame));
|
||||
}
|
||||
|
||||
// launch pipeline
|
||||
try {
|
||||
while(true) {
|
||||
queue.push(extract_decoded_frame(sess_ptr->session, decode_engine));
|
||||
decoded_number++;
|
||||
}
|
||||
} catch (...) {}
|
||||
|
||||
// send stop
|
||||
queue.push_stop();
|
||||
});
|
||||
|
||||
std::thread preproc_thread([&preproc_engine, &queue, &preproc_number, required_frame_param] () {
|
||||
// create preproc session based on frame description & network info
|
||||
cv::MediaFrame first_decoded_frame = queue.pop();
|
||||
cv::util::optional<pp_params> first_pp_params = preproc_engine.is_applicable(first_decoded_frame);
|
||||
ASSERT_TRUE(first_pp_params.has_value());
|
||||
pp_session first_pp_sess =
|
||||
preproc_engine.initialize_preproc(first_pp_params.value(), required_frame_param);
|
||||
|
||||
// make preproc using incoming decoded frame & preproc session
|
||||
cv::MediaFrame first_pp_frame = preproc_engine.run_sync(first_pp_sess, first_decoded_frame);
|
||||
cv::GFrameDesc first_outcome_pp_desc = first_pp_frame.desc();
|
||||
|
||||
// do not hold media frames because they share limited DX11 surface pool resources
|
||||
first_decoded_frame = cv::MediaFrame();
|
||||
first_pp_frame = cv::MediaFrame();
|
||||
|
||||
// launch pipeline
|
||||
bool in_progress = false;
|
||||
// let's allow counting of preprocessed frames to check this value later:
|
||||
// Currently, it looks redundant to implement any kind of gracefull shutdown logic
|
||||
// in this test - so let's apply agreement that media source is processed
|
||||
// succesfully when preproc_number != 1 in result
|
||||
preproc_number = 1;
|
||||
try {
|
||||
while(true) {
|
||||
cv::MediaFrame decoded_frame = queue.pop();
|
||||
if (SafeQueue::is_stop(decoded_frame)) {
|
||||
break;
|
||||
}
|
||||
in_progress = true;
|
||||
|
||||
cv::util::optional<pp_params> params = preproc_engine.is_applicable(decoded_frame);
|
||||
ASSERT_TRUE(params.has_value());
|
||||
ASSERT_TRUE(0 == memcmp(¶ms.value(), &first_pp_params.value(), sizeof(pp_params)));
|
||||
|
||||
pp_session pp_sess = preproc_engine.initialize_preproc(params.value(),
|
||||
required_frame_param);
|
||||
ASSERT_EQ(pp_sess.get<EngineSession>().get(),
|
||||
first_pp_sess.get<EngineSession>().get());
|
||||
|
||||
cv::MediaFrame pp_frame = preproc_engine.run_sync(pp_sess, decoded_frame);
|
||||
cv::GFrameDesc pp_desc = pp_frame.desc();
|
||||
ASSERT_TRUE(pp_desc == first_outcome_pp_desc);
|
||||
in_progress = false;
|
||||
preproc_number++;
|
||||
}
|
||||
} catch (...) {}
|
||||
|
||||
// test if interruption has happened
|
||||
ASSERT_FALSE(in_progress);
|
||||
ASSERT_NE(preproc_number, 1);
|
||||
});
|
||||
|
||||
decode_thread.join();
|
||||
preproc_thread.join();
|
||||
ASSERT_EQ(preproc_number, decoded_number);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(OneVPL_Source_PreprocEngine, VPPPreprocParams,
|
||||
testing::ValuesIn(files));
|
||||
|
||||
using VPPInnerPreprocParams = VPPPreprocParams;
|
||||
TEST_P(VPPInnerPreprocParams, functional_inner_preproc_size)
|
||||
{
|
||||
using namespace cv::gapi::wip;
|
||||
using namespace cv::gapi::wip::onevpl;
|
||||
source_t file_path;
|
||||
decoder_t decoder_id;
|
||||
acceleration_t accel;
|
||||
out_frame_info_t required_frame_param;
|
||||
std::tie(file_path, decoder_id, accel, required_frame_param) = GetParam();
|
||||
|
||||
file_path = findDataFile(file_path);
|
||||
|
||||
std::vector<CfgParam> cfg_params_w_dx11_vpp;
|
||||
|
||||
// create accel policy
|
||||
cfg_params_w_dx11_vpp.push_back(CfgParam::create_acceleration_mode(accel));
|
||||
std::unique_ptr<VPLAccelerationPolicy> accel_policy (
|
||||
new VPLDX11AccelerationPolicy(std::make_shared<CfgParamDeviceSelector>(cfg_params_w_dx11_vpp)));
|
||||
|
||||
// create file data provider
|
||||
std::shared_ptr<IDataProvider> data_provider(new FileDataProvider(file_path,
|
||||
{CfgParam::create_decoder_id(decoder_id)}));
|
||||
|
||||
// create decode session
|
||||
mfxLoader mfx{};
|
||||
mfxConfig mfx_cfg{};
|
||||
std::tie(mfx, mfx_cfg) = prepare_mfx(decoder_id, accel);
|
||||
|
||||
mfxSession mfx_decode_session{};
|
||||
mfxStatus sts = MFXCreateSession(mfx, 0, &mfx_decode_session);
|
||||
EXPECT_EQ(MFX_ERR_NONE, sts);
|
||||
|
||||
// fill vpp params beforehand: resolution
|
||||
cfg_params_w_dx11_vpp.push_back(CfgParam::create_vpp_out_width(
|
||||
static_cast<uint16_t>(required_frame_param.size.width)));
|
||||
cfg_params_w_dx11_vpp.push_back(CfgParam::create_vpp_out_height(
|
||||
static_cast<uint16_t>(required_frame_param.size.height)));
|
||||
|
||||
// create transcode engine
|
||||
auto device_selector = accel_policy->get_device_selector();
|
||||
VPLLegacyTranscodeEngine engine(std::move(accel_policy));
|
||||
auto sess_ptr = engine.initialize_session(mfx_decode_session,
|
||||
cfg_params_w_dx11_vpp,
|
||||
data_provider);
|
||||
// make test in loop
|
||||
bool in_progress = false;
|
||||
size_t frames_processed_count = 1;
|
||||
try {
|
||||
while(true) {
|
||||
cv::MediaFrame decoded_frame = extract_decoded_frame(sess_ptr->session, engine);
|
||||
in_progress = true;
|
||||
ASSERT_EQ(decoded_frame.desc().size.width,
|
||||
ALIGN16(required_frame_param.size.width));
|
||||
ASSERT_EQ(decoded_frame.desc().size.height,
|
||||
ALIGN16(required_frame_param.size.height));
|
||||
ASSERT_EQ(decoded_frame.desc().fmt, required_frame_param.fmt);
|
||||
frames_processed_count++;
|
||||
in_progress = false;
|
||||
}
|
||||
} catch (...) {}
|
||||
|
||||
// test if interruption has happened
|
||||
ASSERT_FALSE(in_progress);
|
||||
ASSERT_NE(frames_processed_count, 1);
|
||||
}
|
||||
|
||||
INSTANTIATE_TEST_CASE_P(OneVPL_Source_PreprocInner, VPPInnerPreprocParams,
|
||||
testing::ValuesIn(files));
|
||||
#endif // HAVE_DIRECTX
|
||||
#endif // HAVE_D3D11
|
||||
} // namespace opencv_test
|
||||
#endif // HAVE_ONEVPL
|
Loading…
Reference in New Issue
Block a user