mirror of
https://github.com/opencv/opencv.git
synced 2024-11-24 03:00:14 +08:00
Merge pull request #21636 from TolyaTalamanov:at/gapi_modeling_tool_drop_frames
[G-API] Pipeline modeling tool - support frame dropping for source * Implement drop frames functionality for dummy src * Reconsider frame dropping * Fix comments
This commit is contained in:
parent
54733eba6f
commit
e5f2a8ebf2
@ -224,6 +224,7 @@ int main(int argc, char* argv[]) {
|
||||
" if set to 0. If it's specified will be"
|
||||
" applied for every pipeline. }"
|
||||
"{ app_mode | realtime | Application mode (realtime/benchmark). }"
|
||||
"{ drop_frames | false | Drop frames if they come earlier than pipeline is completed. }"
|
||||
"{ exec_list | | A comma-separated list of pipelines that"
|
||||
" will be executed. Spaces around commas"
|
||||
" are prohibited. }";
|
||||
@ -238,10 +239,11 @@ int main(int argc, char* argv[]) {
|
||||
const auto load_config = cmd.get<std::string>("load_config");
|
||||
const auto cached_dir = cmd.get<std::string>("cache_dir");
|
||||
const auto log_file = cmd.get<std::string>("log_file");
|
||||
const auto pl_mode = strToPLMode(cmd.get<std::string>("pl_mode"));
|
||||
const auto cmd_pl_mode = strToPLMode(cmd.get<std::string>("pl_mode"));
|
||||
const auto qc = cmd.get<int>("qc");
|
||||
const auto app_mode = strToAppMode(cmd.get<std::string>("app_mode"));
|
||||
const auto exec_str = cmd.get<std::string>("exec_list");
|
||||
const auto drop_frames = cmd.get<bool>("drop_frames");
|
||||
|
||||
cv::FileStorage fs;
|
||||
if (cfg.empty()) {
|
||||
@ -306,7 +308,8 @@ int main(int argc, char* argv[]) {
|
||||
if (app_mode == AppMode::BENCHMARK) {
|
||||
latency = 0.0;
|
||||
}
|
||||
builder.setSource(src_name, latency, output);
|
||||
auto src = std::make_shared<DummySource>(latency, output, drop_frames);
|
||||
builder.setSource(src_name, src);
|
||||
}
|
||||
|
||||
const auto& nodes_fn = check_and_get_fn(pl_fn, "nodes", name);
|
||||
@ -352,9 +355,18 @@ int main(int argc, char* argv[]) {
|
||||
builder.addEdge(edge);
|
||||
}
|
||||
|
||||
auto cfg_pl_mode = readOpt<std::string>(pl_fn["mode"]);
|
||||
// NB: Pipeline mode from config takes priority over cmd.
|
||||
auto mode = readOpt<std::string>(pl_fn["mode"]);
|
||||
builder.setMode(mode.has_value() ? strToPLMode(mode.value()) : pl_mode);
|
||||
auto pl_mode = cfg_pl_mode.has_value()
|
||||
? strToPLMode(cfg_pl_mode.value()) : cmd_pl_mode;
|
||||
// NB: Using drop_frames with streaming pipelines will follow to
|
||||
// incorrect performance results.
|
||||
if (drop_frames && pl_mode == PLMode::STREAMING) {
|
||||
throw std::logic_error(
|
||||
"--drop_frames option is supported only for pipelines in \"regular\" mode");
|
||||
}
|
||||
|
||||
builder.setMode(pl_mode);
|
||||
|
||||
// NB: Queue capacity from config takes priority over cmd.
|
||||
auto config_qc = readOpt<int>(pl_fn["queue_capacity"]);
|
||||
|
@ -14,21 +14,23 @@ class DummySource final: public cv::gapi::wip::IStreamSource {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<DummySource>;
|
||||
DummySource(const double latency,
|
||||
const OutputDescr& output);
|
||||
const OutputDescr& output,
|
||||
const bool drop_frames);
|
||||
bool pull(cv::gapi::wip::Data& data) override;
|
||||
cv::GMetaArg descr_of() const override;
|
||||
|
||||
private:
|
||||
double m_latency;
|
||||
cv::Mat m_mat;
|
||||
using TimePoint =
|
||||
std::chrono::time_point<std::chrono::high_resolution_clock>;
|
||||
cv::optional<TimePoint> m_prev_pull_tp;
|
||||
bool m_drop_frames;
|
||||
double m_next_tick_ts = -1;
|
||||
int64_t m_curr_seq_id = 0;
|
||||
};
|
||||
|
||||
DummySource::DummySource(const double latency,
|
||||
const OutputDescr& output)
|
||||
: m_latency(latency) {
|
||||
const OutputDescr& output,
|
||||
const bool drop_frames)
|
||||
: m_latency(latency), m_drop_frames(drop_frames) {
|
||||
utils::createNDMat(m_mat, output.dims, output.precision);
|
||||
utils::generateRandom(m_mat);
|
||||
}
|
||||
@ -36,23 +38,60 @@ DummySource::DummySource(const double latency,
|
||||
bool DummySource::pull(cv::gapi::wip::Data& data) {
|
||||
using namespace std::chrono;
|
||||
using namespace cv::gapi::streaming;
|
||||
// NB: In case it's the first pull.
|
||||
if (!m_prev_pull_tp) {
|
||||
m_prev_pull_tp = cv::util::make_optional(high_resolution_clock::now());
|
||||
|
||||
// NB: Wait m_latency before return the first frame.
|
||||
if (m_next_tick_ts == -1) {
|
||||
m_next_tick_ts = utils::timestamp<milliseconds>() + m_latency;
|
||||
}
|
||||
|
||||
int64_t curr_ts = utils::timestamp<milliseconds>();
|
||||
if (curr_ts < m_next_tick_ts) {
|
||||
/*
|
||||
* curr_ts
|
||||
* |
|
||||
* ------|----*-----|------->
|
||||
* ^
|
||||
* m_next_tick_ts
|
||||
*
|
||||
*
|
||||
* NB: New frame will be produced at the m_next_tick_ts point.
|
||||
*/
|
||||
utils::sleep(m_next_tick_ts - curr_ts);
|
||||
} else {
|
||||
/*
|
||||
* curr_ts
|
||||
* +1 +2 |
|
||||
* |----------|----------|----------|----*-----|------->
|
||||
* ^ ^
|
||||
* m_next_tick_ts ------------->
|
||||
*
|
||||
*
|
||||
* NB: Shift m_next_tick_ts to the nearest tick before curr_ts and
|
||||
* update current seq_id correspondingly.
|
||||
*
|
||||
* if drop_frames is enabled, wait for the next tick, otherwise
|
||||
* return last writen frame (+2 at the picture above) immediately.
|
||||
*/
|
||||
int64_t num_frames =
|
||||
static_cast<int64_t>((curr_ts - m_next_tick_ts) / m_latency);
|
||||
m_curr_seq_id += num_frames;
|
||||
m_next_tick_ts += num_frames * m_latency;
|
||||
if (m_drop_frames) {
|
||||
m_next_tick_ts += m_latency;
|
||||
++m_curr_seq_id;
|
||||
utils::sleep(m_next_tick_ts - curr_ts);
|
||||
}
|
||||
}
|
||||
|
||||
// NB: Just increase reference counter not to release mat memory
|
||||
// after assigning it to the data.
|
||||
cv::Mat mat = m_mat;
|
||||
auto end = high_resolution_clock::now();
|
||||
auto elapsed =
|
||||
duration_cast<duration<double, std::milli>>(end - *m_prev_pull_tp).count();
|
||||
auto delta = m_latency - elapsed;
|
||||
if (delta > 0) {
|
||||
utils::sleep(delta);
|
||||
}
|
||||
data.meta[meta_tag::timestamp] = int64_t{utils::timestamp<milliseconds>()};
|
||||
|
||||
data.meta[meta_tag::timestamp] = utils::timestamp<milliseconds>();
|
||||
data.meta[meta_tag::seq_id] = m_curr_seq_id++;
|
||||
data = mat;
|
||||
m_prev_pull_tp = cv::util::make_optional(high_resolution_clock::now());
|
||||
m_next_tick_ts += m_latency;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -184,9 +184,8 @@ public:
|
||||
|
||||
void addInfer(const std::string& name, const InferParams& params);
|
||||
|
||||
void setSource(const std::string& name,
|
||||
double latency,
|
||||
const OutputDescr& output);
|
||||
void setSource(const std::string& name,
|
||||
std::shared_ptr<DummySource> src);
|
||||
|
||||
void addEdge(const Edge& edge);
|
||||
void setMode(PLMode mode);
|
||||
@ -315,11 +314,10 @@ void PipelineBuilder::addEdge(const Edge& edge) {
|
||||
out_data->out_nodes.push_back(dst_node);
|
||||
}
|
||||
|
||||
void PipelineBuilder::setSource(const std::string& name,
|
||||
double latency,
|
||||
const OutputDescr& output) {
|
||||
GAPI_Assert(!m_state->src);
|
||||
m_state->src = std::make_shared<DummySource>(latency, output);
|
||||
void PipelineBuilder::setSource(const std::string& name,
|
||||
std::shared_ptr<DummySource> src) {
|
||||
GAPI_Assert(!m_state->src && "Only single source pipelines are supported!");
|
||||
m_state->src = src;
|
||||
addCall(name, SourceCall{});
|
||||
}
|
||||
|
||||
|
@ -907,25 +907,52 @@ def test_error_invalid_pl_mode():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0'\" """
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0'\" """
|
||||
|
||||
exec_str = '{} --cfg={} --app_mode=unknown'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Unsupported AppMode: unknown\n'
|
||||
'Please chose between: realtime and benchmark')
|
||||
|
||||
|
||||
def test_error_drop_frames_with_streaming():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0'\" """
|
||||
|
||||
exec_str = '{} --cfg={} --pl_mode=streaming --drop_frames'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('--drop_frames option is supported only for pipelines in "regular" mode')
|
||||
|
Loading…
Reference in New Issue
Block a user