diff --git a/modules/gapi/samples/pipeline_modeling_tool.cpp b/modules/gapi/samples/pipeline_modeling_tool.cpp index 4ff2cbd82c..2ed9642256 100644 --- a/modules/gapi/samples/pipeline_modeling_tool.cpp +++ b/modules/gapi/samples/pipeline_modeling_tool.cpp @@ -173,6 +173,50 @@ static PLMode strToPLMode(const std::string& mode_str) { } } +template <> +CallParams read(const cv::FileNode& fn) { + auto name = + check_and_read(fn, "name", "node"); + // FIXME: Impossible to read size_t due OpenCV limitations. + auto call_every_nth_opt = readOpt(fn["call_every_nth"]); + auto call_every_nth = call_every_nth_opt.value_or(1); + if (call_every_nth <= 0) { + throw std::logic_error( + name + " call_every_nth must be greater than zero\n" + "Current call_every_nth: " + std::to_string(call_every_nth)); + } + return CallParams{std::move(name), static_cast(call_every_nth)}; +} + +template <> +InferParams read(const cv::FileNode& fn) { + auto name = + check_and_read(fn, "name", "node"); + + InferParams params; + params.path = read(fn); + params.device = check_and_read(fn, "device", name); + params.input_layers = readList(fn, "input_layers", name); + params.output_layers = readList(fn, "output_layers", name); + + return params; +} + +template <> +DummyParams read(const cv::FileNode& fn) { + auto name = + check_and_read(fn, "name", "node"); + + DummyParams params; + params.time = check_and_read(fn, "time", name); + if (params.time < 0) { + throw std::logic_error(name + " time must be positive"); + } + params.output = check_and_read(fn, "output", name); + + return params; +} + static std::vector parseExecList(const std::string& exec_list) { std::vector pl_types; std::stringstream ss(exec_list); @@ -316,31 +360,17 @@ int main(int argc, char* argv[]) { if (!nodes_fn.isSeq()) { throw std::logic_error("nodes in " + name + " must be a sequence"); } + for (auto node_fn : nodes_fn) { - auto node_name = - check_and_read(node_fn, "name", "node"); + auto call_params = read(node_fn); auto node_type = check_and_read(node_fn, "type", "node"); if (node_type == "Dummy") { - auto time = - check_and_read(node_fn, "time", node_name); - if (time < 0) { - throw std::logic_error(node_name + " time must be positive"); - } - auto output = - check_and_read(node_fn, "output", node_name); - builder.addDummy(node_name, time, output); + builder.addDummy(call_params, read(node_fn)); } else if (node_type == "Infer") { - InferParams params; - params.path = read(node_fn); - params.device = - check_and_read(node_fn, "device", node_name); - params.input_layers = - readList(node_fn, "input_layers", node_name); - params.output_layers = - readList(node_fn, "output_layers", node_name); - params.config = config; - builder.addInfer(node_name, params); + auto infer_params = read(node_fn); + infer_params.config = config; + builder.addInfer(call_params, infer_params); } else { throw std::logic_error("Unsupported node type: " + node_type); } diff --git a/modules/gapi/samples/pipeline_modeling_tool/pipeline_builder.hpp b/modules/gapi/samples/pipeline_modeling_tool/pipeline_builder.hpp index a4f69b60ad..3906ae4f4c 100644 --- a/modules/gapi/samples/pipeline_modeling_tool/pipeline_builder.hpp +++ b/modules/gapi/samples/pipeline_modeling_tool/pipeline_builder.hpp @@ -23,11 +23,16 @@ struct Edge { P dst; }; +struct CallParams { + std::string name; + size_t call_every_nth; +}; + struct CallNode { using F = std::function; - std::string name; - F run; + CallParams params; + F run; }; struct DataNode { @@ -44,6 +49,80 @@ struct Node { Kind kind; }; +struct SubGraphCall { + G_API_OP(GSubGraph, + , + "custom.subgraph") { + static cv::GMatDesc outMeta(const cv::GMatDesc& in, + cv::GComputation comp, + cv::GCompileArgs compile_args, + const size_t call_every_nth) { + GAPI_Assert(call_every_nth > 0); + auto out_metas = + comp.compile(in, std::move(compile_args)).outMetas(); + GAPI_Assert(out_metas.size() == 1u); + GAPI_Assert(cv::util::holds_alternative(out_metas[0])); + return cv::util::get(out_metas[0]); + } + + }; + + struct SubGraphState { + cv::Mat last_result; + cv::GCompiled cc; + int call_counter = 0; + }; + + GAPI_OCV_KERNEL_ST(SubGraphImpl, GSubGraph, SubGraphState) { + static void setup(const cv::GMatDesc& in, + cv::GComputation comp, + cv::GCompileArgs compile_args, + const size_t /*call_every_nth*/, + std::shared_ptr& state, + const cv::GCompileArgs& /*args*/) { + state.reset(new SubGraphState{}); + state->cc = comp.compile(in, std::move(compile_args)); + auto out_desc = + cv::util::get(state->cc.outMetas()[0]); + utils::createNDMat(state->last_result, + out_desc.dims, + out_desc.depth); + } + + static void run(const cv::Mat& in, + cv::GComputation /*comp*/, + cv::GCompileArgs /*compile_args*/, + const size_t call_every_nth, + cv::Mat& out, + SubGraphState& state) { + // NB: Make a call on the first iteration and skip the furthers. + if (state.call_counter == 0) { + state.cc(in, state.last_result); + } + state.last_result.copyTo(out); + state.call_counter = (state.call_counter + 1) % call_every_nth; + } + }; + + void operator()(const cv::GProtoArgs& inputs, cv::GProtoArgs& outputs); + + size_t numInputs() const { return 1; } + size_t numOutputs() const { return 1; } + + cv::GComputation comp; + cv::GCompileArgs compile_args; + size_t call_every_nth; +}; + +void SubGraphCall::operator()(const cv::GProtoArgs& inputs, + cv::GProtoArgs& outputs) { + GAPI_Assert(inputs.size() == 1u); + GAPI_Assert(cv::util::holds_alternative(inputs[0])); + GAPI_Assert(outputs.empty()); + auto in = cv::util::get(inputs[0]); + outputs.emplace_back(GSubGraph::on(in, comp, compile_args, call_every_nth)); +} + struct DummyCall { G_API_OP(GDummy, , @@ -166,6 +245,11 @@ struct ImportPath { using ModelPath = cv::util::variant; +struct DummyParams { + double time; + OutputDescr output; +}; + struct InferParams { std::string name; ModelPath path; @@ -178,11 +262,11 @@ struct InferParams { class PipelineBuilder { public: PipelineBuilder(); - void addDummy(const std::string& name, - const double time, - const OutputDescr& output); + void addDummy(const CallParams& call_params, + const DummyParams& dummy_params); - void addInfer(const std::string& name, const InferParams& params); + void addInfer(const CallParams& call_params, + const InferParams& infer_params); void setSource(const std::string& name, std::shared_ptr src); @@ -197,8 +281,8 @@ public: private: template - void addCall(const std::string& name, - CallT&& call); + void addCall(const CallParams& call_params, + CallT&& call); Pipeline::Ptr construct(); @@ -226,20 +310,21 @@ private: PipelineBuilder::PipelineBuilder() : m_state(new State{}) { }; -void PipelineBuilder::addDummy(const std::string& name, - const double time, - const OutputDescr& output) { +void PipelineBuilder::addDummy(const CallParams& call_params, + const DummyParams& dummy_params) { m_state->kernels.include(); - addCall(name, DummyCall{time, output}); + addCall(call_params, + DummyCall{dummy_params.time, dummy_params.output}); } template -void PipelineBuilder::addCall(const std::string& name, - CallT&& call) { +void PipelineBuilder::addCall(const CallParams& call_params, + CallT&& call) { size_t num_inputs = call.numInputs(); size_t num_outputs = call.numOutputs(); - Node::Ptr call_node(new Node{{},{},Node::Kind{CallNode{name, std::move(call)}}}); + Node::Ptr call_node(new Node{{},{},Node::Kind{CallNode{call_params, + std::move(call)}}}); // NB: Create placeholders for inputs. call_node->in_nodes.resize(num_inputs); // NB: Create outputs with empty data. @@ -249,36 +334,39 @@ void PipelineBuilder::addCall(const std::string& name, Node::Kind{DataNode{}}}); } - auto it = m_state->calls_map.find(name); + auto it = m_state->calls_map.find(call_params.name); if (it != m_state->calls_map.end()) { - throw std::logic_error("Node: " + name + " already exists!"); + throw std::logic_error("Node: " + call_params.name + " already exists!"); } - m_state->calls_map.emplace(name, call_node); + m_state->calls_map.emplace(call_params.name, call_node); m_state->all_calls.emplace_back(call_node); } -void PipelineBuilder::addInfer(const std::string& name, - const InferParams& params) { +void PipelineBuilder::addInfer(const CallParams& call_params, + const InferParams& infer_params) { // NB: No default ctor for Params. std::unique_ptr> pp; - if (cv::util::holds_alternative(params.path)) { - auto load_path = cv::util::get(params.path); - pp.reset(new cv::gapi::ie::Params(name, + if (cv::util::holds_alternative(infer_params.path)) { + auto load_path = cv::util::get(infer_params.path); + pp.reset(new cv::gapi::ie::Params(call_params.name, load_path.xml, load_path.bin, - params.device)); + infer_params.device)); } else { - GAPI_Assert(cv::util::holds_alternative(params.path)); - auto import_path = cv::util::get(params.path); - pp.reset(new cv::gapi::ie::Params(name, + GAPI_Assert(cv::util::holds_alternative(infer_params.path)); + auto import_path = cv::util::get(infer_params.path); + pp.reset(new cv::gapi::ie::Params(call_params.name, import_path.blob, - params.device)); + infer_params.device)); } - pp->pluginConfig(params.config); + pp->pluginConfig(infer_params.config); m_state->networks += cv::gapi::networks(*pp); - addCall(name, InferCall{name, params.input_layers, params.output_layers}); + addCall(call_params, + InferCall{call_params.name, + infer_params.input_layers, + infer_params.output_layers}); } void PipelineBuilder::addEdge(const Edge& edge) { @@ -318,7 +406,7 @@ void PipelineBuilder::setSource(const std::string& name, std::shared_ptr src) { GAPI_Assert(!m_state->src && "Only single source pipelines are supported!"); m_state->src = src; - addCall(name, SourceCall{}); + addCall(CallParams{name, 1u/*call_every_nth*/}, SourceCall{}); } void PipelineBuilder::setMode(PLMode mode) { @@ -405,7 +493,7 @@ Pipeline::Ptr PipelineBuilder::construct() { if (in_data_node.expired()) { const auto& call = cv::util::get(call_node->kind); throw std::logic_error( - "Node: " + call.name + " in Pipeline: " + m_state->name + + "Node: " + call.params.name + " in Pipeline: " + m_state->name + " has dangling input by in port: " + std::to_string(i)); } } @@ -424,8 +512,14 @@ Pipeline::Ptr PipelineBuilder::construct() { sorted_calls.push_back(n); } } + + m_state->kernels.include(); + m_state->compile_args.emplace_back(m_state->networks); + m_state->compile_args.emplace_back(m_state->kernels); + // (2). Go through every call node. for (auto call_node : sorted_calls) { + auto& call = cv::util::get(call_node->kind); cv::GProtoArgs outputs; cv::GProtoArgs inputs; for (size_t i = 0; i < call_node->in_nodes.size(); ++i) { @@ -437,8 +531,37 @@ Pipeline::Ptr PipelineBuilder::construct() { // (3). Extract proto input from every input node. inputs.push_back(in_data.arg.value()); } + // NB: If node shouldn't be called on each iterations, + // it should be wrapped into subgraph which is able to skip calling. + if (call.params.call_every_nth != 1u) { + // FIXME: Limitation of the subgraph operation (). + // G-API doesn't support dynamic number of inputs/outputs. + if (inputs.size() > 1u) { + throw std::logic_error( + "skip_frame_nth is supported only for single input subgraphs\n" + "Current subgraph has " + std::to_string(inputs.size()) + " inputs"); + } + + if (outputs.size() > 1u) { + throw std::logic_error( + "skip_frame_nth is supported only for single output subgraphs\n" + "Current subgraph has " + std::to_string(inputs.size()) + " outputs"); + } + // FIXME: Should be generalized. + // Now every subgraph contains only single node + // which has single input/output. + GAPI_Assert(cv::util::holds_alternative(inputs[0])); + cv::GProtoArgs subgr_inputs{cv::GProtoArg{cv::GMat()}}; + cv::GProtoArgs subgr_outputs; + call.run(subgr_inputs, subgr_outputs); + auto comp = cv::GComputation(cv::GProtoInputArgs{subgr_inputs}, + cv::GProtoOutputArgs{subgr_outputs}); + call = CallNode{CallParams{call.params.name, 1u/*call_every_nth*/}, + SubGraphCall{std::move(comp), + m_state->compile_args, + call.params.call_every_nth}}; + } // (4). Run call and get outputs. - auto call = cv::util::get(call_node->kind); call.run(inputs, outputs); // (5) If call node doesn't have inputs // it means that it's input producer node (Source). @@ -460,9 +583,6 @@ Pipeline::Ptr PipelineBuilder::construct() { } } - m_state->compile_args.emplace_back(m_state->networks); - m_state->compile_args.emplace_back(m_state->kernels); - if (m_state->mode == PLMode::STREAMING) { GAPI_Assert(graph_inputs.size() == 1); GAPI_Assert(cv::util::holds_alternative(graph_inputs[0])); diff --git a/modules/gapi/samples/pipeline_modeling_tool/test_pipeline_modeling_tool.py b/modules/gapi/samples/pipeline_modeling_tool/test_pipeline_modeling_tool.py index f36d0efc3b..d56a0399e9 100644 --- a/modules/gapi/samples/pipeline_modeling_tool/test_pipeline_modeling_tool.py +++ b/modules/gapi/samples/pipeline_modeling_tool/test_pipeline_modeling_tool.py @@ -956,3 +956,29 @@ Pipelines: exec_str = '{} --cfg={} --pl_mode=streaming --drop_frames'.format(pipeline_modeling_tool, cfg_file) out = get_output(exec_str) assert out.startswith('--drop_frames option is supported only for pipelines in "regular" mode') + + +def test_incorrect_call_every_nth(): + cfg_file = """\"%YAML:1.0 +work_time: 1000 +Pipelines: + PL1: + source: + name: 'Src' + latency: 20 + output: + dims: [1,2,3,4] + precision: 'U8' + nodes: + - name: 'Node0' + type: 'Dummy' + call_every_nth: {}\" """ + + error = 'Node0 call_every_nth must be greater than zero\nCurrent call_every_nth: {}' + + def check(cfg_file, call_every_nth): + out = get_output('{} --cfg={}'.format(pipeline_modeling_tool, cfg_file.format(call_every_nth))) + assert out.startswith(error.format(call_every_nth)) + + check(cfg_file, -3) + check(cfg_file, 0)