mirror of
https://github.com/opencv/opencv.git
synced 2024-11-27 20:50:25 +08:00
Merge pull request #21477 from TolyaTalamanov:at/pipeline-builder-tool
[G-API] Implement G-API pipeline modeling tool * Implement G-API pipeline builder tool * Remove whitespaces from config * Remove unused unittest stuff * Fix comments to review * Fix MAC warning * Rename to pipeline_modeling_tool * Move to opencv apps * Move config to gapi/samples/data * gapi: samples sources are installed automatically
This commit is contained in:
parent
f4a7754cc0
commit
a92cba8484
@ -346,7 +346,7 @@ ocv_add_samples()
|
||||
|
||||
|
||||
# Required for sample with inference on host
|
||||
if (TARGET example_gapi_onevpl_infer_single_roi)
|
||||
if(TARGET example_gapi_onevpl_infer_single_roi)
|
||||
if(OPENCV_GAPI_INF_ENGINE)
|
||||
ocv_target_link_libraries(example_gapi_onevpl_infer_single_roi PRIVATE ${INF_ENGINE_TARGET})
|
||||
ocv_target_compile_definitions(example_gapi_onevpl_infer_single_roi PRIVATE -DHAVE_INF_ENGINE)
|
||||
@ -356,6 +356,12 @@ if (TARGET example_gapi_onevpl_infer_single_roi)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(TARGET example_gapi_pipeline_modeling_tool)
|
||||
if(WIN32)
|
||||
ocv_target_link_libraries(example_gapi_pipeline_modeling_tool winmm.lib)
|
||||
endif()
|
||||
endif()
|
||||
|
||||
# perf test dependencies postprocessing
|
||||
if(HAVE_GAPI_ONEVPL)
|
||||
# NB: TARGET opencv_perf_gapi doesn't exist before `ocv_add_perf_tests`
|
||||
|
192
modules/gapi/samples/data/config_template.yml
Normal file
192
modules/gapi/samples/data/config_template.yml
Normal file
@ -0,0 +1,192 @@
|
||||
%YAML:1.0
|
||||
|
||||
# Application running time in milliseconds: integer.
|
||||
work_time: 2000
|
||||
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 33.0
|
||||
output:
|
||||
dims: [1, 3, 1280, 720]
|
||||
precision: 'U8'
|
||||
|
||||
nodes:
|
||||
- name: 'PP'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1, 3, 300, 300]
|
||||
precision: 'U8'
|
||||
|
||||
- name: 'Infer'
|
||||
type: 'Infer'
|
||||
xml: 'face-detection-retail-0004.xml'
|
||||
bin: 'face-detection-retail-0004.bin'
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'data'
|
||||
output_layers:
|
||||
- 'detection_out'
|
||||
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'PP'
|
||||
- from: 'PP'
|
||||
to: 'Infer'
|
||||
|
||||
# Path to the dump file (*.dot)'
|
||||
dump: 'pl1.dot'
|
||||
|
||||
PL2:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 50.0
|
||||
output:
|
||||
dims: [1, 3, 1280, 720]
|
||||
precision: 'U8'
|
||||
|
||||
nodes:
|
||||
- name: 'M1_PP'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1, 3, 300, 300]
|
||||
precision: 'U8'
|
||||
|
||||
- name: 'M1_Infer'
|
||||
type: 'Infer'
|
||||
xml: 'face-detection-retail-0004.xml'
|
||||
bin: 'face-detection-retail-0004.bin'
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'data'
|
||||
output_layers:
|
||||
- 'detection_out'
|
||||
|
||||
- name: 'M2_PP'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1, 3, 300, 300]
|
||||
precision: 'U8'
|
||||
|
||||
- name: 'M2_Infer'
|
||||
type: 'Infer'
|
||||
xml: 'face-detection-retail-0004.xml'
|
||||
bin: 'face-detection-retail-0004.bin'
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'data'
|
||||
output_layers:
|
||||
- 'detection_out'
|
||||
|
||||
- name: 'M3_PP'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1, 3, 300, 300]
|
||||
precision: 'U8'
|
||||
|
||||
- name: 'M3_Infer'
|
||||
type: 'Infer'
|
||||
xml: 'face-detection-retail-0004.xml'
|
||||
bin: 'face-detection-retail-0004.bin'
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'data'
|
||||
output_layers:
|
||||
- 'detection_out'
|
||||
|
||||
- name: 'M4_PP'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1, 3, 300, 300]
|
||||
precision: 'U8'
|
||||
|
||||
- name: 'M4_Infer'
|
||||
type: 'Infer'
|
||||
xml: 'face-detection-retail-0004.xml'
|
||||
bin: 'face-detection-retail-0004.bin'
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'data'
|
||||
output_layers:
|
||||
- 'detection_out'
|
||||
|
||||
- name: 'M5_PP'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1, 3, 300, 300]
|
||||
precision: 'U8'
|
||||
|
||||
- name: 'M5_Infer'
|
||||
type: 'Infer'
|
||||
xml: 'face-detection-retail-0004.xml'
|
||||
bin: 'face-detection-retail-0004.bin'
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'data'
|
||||
output_layers:
|
||||
- 'detection_out'
|
||||
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'M1_PP'
|
||||
- from: 'M1_PP'
|
||||
to: 'M1_Infer'
|
||||
- from: 'M1_Infer'
|
||||
to: 'M2_PP'
|
||||
- from: 'M2_PP'
|
||||
to: 'M2_Infer'
|
||||
- from: 'M2_Infer'
|
||||
to: 'M3_PP'
|
||||
- from: 'M3_PP'
|
||||
to: 'M3_Infer'
|
||||
- from: 'M3_Infer'
|
||||
to: 'M4_PP'
|
||||
- from: 'M4_PP'
|
||||
to: 'M4_Infer'
|
||||
- from: 'M4_Infer'
|
||||
to: 'M5_PP'
|
||||
- from: 'M5_PP'
|
||||
to: 'M5_Infer'
|
||||
|
||||
dump: 'pl2.dot'
|
||||
|
||||
PL3:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 33.0
|
||||
output:
|
||||
dims: [1, 3, 1280, 720]
|
||||
precision: 'U8'
|
||||
|
||||
nodes:
|
||||
- name: 'PP'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1, 3, 300, 300]
|
||||
precision: 'U8'
|
||||
|
||||
- name: 'Infer'
|
||||
type: 'Infer'
|
||||
xml: 'face-detection-retail-0004.xml'
|
||||
bin: 'face-detection-retail-0004.bin'
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'data'
|
||||
output_layers:
|
||||
- 'detection_out'
|
||||
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'PP'
|
||||
- from: 'PP'
|
||||
to: 'Infer'
|
||||
|
||||
dump: 'pl3.dot'
|
406
modules/gapi/samples/pipeline_modeling_tool.cpp
Normal file
406
modules/gapi/samples/pipeline_modeling_tool.cpp
Normal file
@ -0,0 +1,406 @@
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <thread>
|
||||
#include <exception>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include <opencv2/gapi.hpp>
|
||||
#include <opencv2/highgui.hpp> // cv::CommandLineParser
|
||||
#include <opencv2/core/utils/filesystem.hpp>
|
||||
|
||||
#if defined(_WIN32)
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
#include "pipeline_modeling_tool/dummy_source.hpp"
|
||||
#include "pipeline_modeling_tool/utils.hpp"
|
||||
#include "pipeline_modeling_tool/pipeline_builder.hpp"
|
||||
|
||||
enum class AppMode {
|
||||
REALTIME,
|
||||
BENCHMARK
|
||||
};
|
||||
|
||||
static AppMode strToAppMode(const std::string& mode_str) {
|
||||
if (mode_str == "realtime") {
|
||||
return AppMode::REALTIME;
|
||||
} else if (mode_str == "benchmark") {
|
||||
return AppMode::BENCHMARK;
|
||||
} else {
|
||||
throw std::logic_error("Unsupported AppMode: " + mode_str +
|
||||
"\nPlease chose between: realtime and benchmark");
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
T read(const cv::FileNode& node) {
|
||||
return static_cast<T>(node);
|
||||
}
|
||||
|
||||
static cv::FileNode check_and_get_fn(const cv::FileNode& fn,
|
||||
const std::string& field,
|
||||
const std::string& uplvl) {
|
||||
const bool is_map = fn.isMap();
|
||||
if (!is_map || fn[field].empty()) {
|
||||
throw std::logic_error(uplvl + " must contain field: " + field);
|
||||
}
|
||||
return fn[field];
|
||||
}
|
||||
|
||||
static cv::FileNode check_and_get_fn(const cv::FileStorage& fs,
|
||||
const std::string& field,
|
||||
const std::string& uplvl) {
|
||||
auto fn = fs[field];
|
||||
if (fn.empty()) {
|
||||
throw std::logic_error(uplvl + " must contain field: " + field);
|
||||
}
|
||||
return fn;
|
||||
}
|
||||
|
||||
template <typename T, typename FileT>
|
||||
T check_and_read(const FileT& f,
|
||||
const std::string& field,
|
||||
const std::string& uplvl) {
|
||||
auto fn = check_and_get_fn(f, field, uplvl);
|
||||
return read<T>(fn);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
cv::optional<T> readOpt(const cv::FileNode& fn) {
|
||||
return fn.empty() ? cv::optional<T>() : cv::optional<T>(read<T>(fn));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::vector<T> readList(const cv::FileNode& fn,
|
||||
const std::string& field,
|
||||
const std::string& uplvl) {
|
||||
auto fn_field = check_and_get_fn(fn, field, uplvl);
|
||||
if (!fn_field.isSeq()) {
|
||||
throw std::logic_error(field + " in " + uplvl + " must be a sequence");
|
||||
}
|
||||
|
||||
std::vector<T> vec;
|
||||
for (auto iter : fn_field) {
|
||||
vec.push_back(read<T>(iter));
|
||||
}
|
||||
return vec;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::vector<T> readVec(const cv::FileNode& fn,
|
||||
const std::string& field,
|
||||
const std::string& uplvl) {
|
||||
auto fn_field = check_and_get_fn(fn, field, uplvl);
|
||||
|
||||
std::vector<T> vec;
|
||||
fn_field >> vec;
|
||||
return vec;
|
||||
}
|
||||
|
||||
static int strToPrecision(const std::string& precision) {
|
||||
static std::unordered_map<std::string, int> str_to_precision = {
|
||||
{"U8", CV_8U}, {"FP32", CV_32F}, {"FP16", CV_16F}
|
||||
};
|
||||
auto it = str_to_precision.find(precision);
|
||||
if (it == str_to_precision.end()) {
|
||||
throw std::logic_error("Unsupported precision: " + precision);
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
template <>
|
||||
OutputDescr read<OutputDescr>(const cv::FileNode& fn) {
|
||||
auto dims = readVec<int>(fn, "dims", "output");
|
||||
auto str_prec = check_and_read<std::string>(fn, "precision", "output");
|
||||
return OutputDescr{dims, strToPrecision(str_prec)};
|
||||
}
|
||||
|
||||
template <>
|
||||
Edge read<Edge>(const cv::FileNode& fn) {
|
||||
auto from = check_and_read<std::string>(fn, "from", "edge");
|
||||
auto to = check_and_read<std::string>(fn, "to", "edge");
|
||||
|
||||
auto splitNameAndPort = [](const std::string& str) {
|
||||
auto pos = str.find(':');
|
||||
auto name =
|
||||
pos == std::string::npos ? str : std::string(str.c_str(), pos);
|
||||
size_t port =
|
||||
pos == std::string::npos ? 0 : std::atoi(str.c_str() + pos + 1);
|
||||
return std::make_pair(name, port);
|
||||
};
|
||||
|
||||
auto p1 = splitNameAndPort(from);
|
||||
auto p2 = splitNameAndPort(to);
|
||||
return Edge{Edge::P{p1.first, p1.second}, Edge::P{p2.first, p2.second}};
|
||||
}
|
||||
|
||||
static std::string getModelsPath() {
|
||||
static char* models_path_c = std::getenv("PIPELINE_MODELS_PATH");
|
||||
static std::string models_path = models_path_c ? models_path_c : ".";
|
||||
return models_path;
|
||||
}
|
||||
|
||||
template <>
|
||||
ModelPath read<ModelPath>(const cv::FileNode& fn) {
|
||||
using cv::utils::fs::join;
|
||||
if (!fn["xml"].empty() && !fn["bin"].empty()) {
|
||||
return ModelPath{LoadPath{join(getModelsPath(), fn["xml"].string()),
|
||||
join(getModelsPath(), fn["bin"].string())}};
|
||||
} else if (!fn["blob"].empty()){
|
||||
return ModelPath{ImportPath{join(getModelsPath(), fn["blob"].string())}};
|
||||
} else {
|
||||
const std::string emsg = R""""(
|
||||
Path to OpenVINO model must be specified in either of two formats:
|
||||
1.
|
||||
xml: path to *.xml
|
||||
bin: path to *.bin
|
||||
2.
|
||||
blob: path to *.blob
|
||||
)"""";
|
||||
throw std::logic_error(emsg);
|
||||
}
|
||||
}
|
||||
|
||||
static PLMode strToPLMode(const std::string& mode_str) {
|
||||
if (mode_str == "streaming") {
|
||||
return PLMode::STREAMING;
|
||||
} else if (mode_str == "regular") {
|
||||
return PLMode::REGULAR;
|
||||
} else {
|
||||
throw std::logic_error("Unsupported PLMode: " + mode_str +
|
||||
"\nPlease chose between: streaming and regular");
|
||||
}
|
||||
}
|
||||
|
||||
static std::vector<std::string> parseExecList(const std::string& exec_list) {
|
||||
std::vector<std::string> pl_types;
|
||||
std::stringstream ss(exec_list);
|
||||
std::string pl_type;
|
||||
while (getline(ss, pl_type, ',')) {
|
||||
pl_types.push_back(pl_type);
|
||||
}
|
||||
return pl_types;
|
||||
}
|
||||
|
||||
static void loadConfig(const std::string& filename,
|
||||
std::map<std::string, std::string>& config) {
|
||||
cv::FileStorage fs(filename, cv::FileStorage::READ);
|
||||
if (!fs.isOpened()) {
|
||||
throw std::runtime_error("Failed to load config: " + filename);
|
||||
}
|
||||
|
||||
cv::FileNode root = fs.root();
|
||||
for (auto it = root.begin(); it != root.end(); ++it) {
|
||||
auto device = *it;
|
||||
if (!device.isMap()) {
|
||||
throw std::runtime_error("Failed to parse config: " + filename);
|
||||
}
|
||||
for (auto item : device) {
|
||||
config.emplace(item.name(), item.string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char* argv[]) {
|
||||
#if defined(_WIN32)
|
||||
timeBeginPeriod(1);
|
||||
#endif
|
||||
try {
|
||||
const std::string keys =
|
||||
"{ h help | | Print this help message. }"
|
||||
"{ cfg | | Path to the config which is either"
|
||||
" YAML file or string. }"
|
||||
"{ load_config | | Optional. Path to XML/YAML/JSON file"
|
||||
" to load custom IE parameters. }"
|
||||
"{ cache_dir | | Optional. Enables caching of loaded models"
|
||||
" to specified directory. }"
|
||||
"{ log_file | | Optional. If file is specified, app will"
|
||||
" dump expanded execution information. }"
|
||||
"{ pl_mode | streaming | Optional. Pipeline mode: streaming/regular"
|
||||
" if it's specified will be applied for"
|
||||
" every pipeline. }"
|
||||
"{ qc | 1 | Optional. Calculated automatically by G-API"
|
||||
" if set to 0. If it's specified will be"
|
||||
" applied for every pipeline. }"
|
||||
"{ app_mode | realtime | Application mode (realtime/benchmark). }"
|
||||
"{ exec_list | | A comma-separated list of pipelines that"
|
||||
" will be executed. Spaces around commas"
|
||||
" are prohibited. }";
|
||||
|
||||
cv::CommandLineParser cmd(argc, argv, keys);
|
||||
if (cmd.has("help")) {
|
||||
cmd.printMessage();
|
||||
return 0;
|
||||
}
|
||||
|
||||
const auto cfg = cmd.get<std::string>("cfg");
|
||||
const auto load_config = cmd.get<std::string>("load_config");
|
||||
const auto cached_dir = cmd.get<std::string>("cache_dir");
|
||||
const auto log_file = cmd.get<std::string>("log_file");
|
||||
const auto pl_mode = strToPLMode(cmd.get<std::string>("pl_mode"));
|
||||
const auto qc = cmd.get<int>("qc");
|
||||
const auto app_mode = strToAppMode(cmd.get<std::string>("app_mode"));
|
||||
const auto exec_str = cmd.get<std::string>("exec_list");
|
||||
|
||||
cv::FileStorage fs;
|
||||
if (cfg.empty()) {
|
||||
throw std::logic_error("Config must be specified via --cfg option");
|
||||
}
|
||||
// NB: *.yml
|
||||
if (cfg.size() < 5) {
|
||||
throw std::logic_error("--cfg string must contain at least 5 symbols"
|
||||
" to determine if it's a file (*.yml) a or string");
|
||||
}
|
||||
if (cfg.substr(cfg.size() - 4, cfg.size()) == ".yml") {
|
||||
if (!fs.open(cfg, cv::FileStorage::READ)) {
|
||||
throw std::logic_error("Failed to open config file: " + cfg);
|
||||
}
|
||||
} else {
|
||||
fs = cv::FileStorage(cfg, cv::FileStorage::FORMAT_YAML |
|
||||
cv::FileStorage::MEMORY);
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> config;
|
||||
if (!load_config.empty()) {
|
||||
loadConfig(load_config, config);
|
||||
}
|
||||
// NB: Takes priority over config from file
|
||||
if (!cached_dir.empty()) {
|
||||
config =
|
||||
std::map<std::string, std::string>{{"CACHE_DIR", cached_dir}};
|
||||
}
|
||||
|
||||
const double work_time_ms =
|
||||
check_and_read<double>(fs, "work_time", "Config");
|
||||
if (work_time_ms < 0) {
|
||||
throw std::logic_error("work_time must be positive");
|
||||
}
|
||||
|
||||
auto pipelines_fn = check_and_get_fn(fs, "Pipelines", "Config");
|
||||
if (!pipelines_fn.isMap()) {
|
||||
throw std::logic_error("Pipelines field must be a map");
|
||||
}
|
||||
|
||||
auto exec_list = !exec_str.empty() ? parseExecList(exec_str)
|
||||
: pipelines_fn.keys();
|
||||
|
||||
|
||||
std::vector<Pipeline::Ptr> pipelines;
|
||||
pipelines.reserve(exec_list.size());
|
||||
// NB: Build pipelines based on config information
|
||||
PipelineBuilder builder;
|
||||
for (const auto& name : exec_list) {
|
||||
const auto& pl_fn = check_and_get_fn(pipelines_fn, name, "Pipelines");
|
||||
builder.setName(name);
|
||||
// NB: Set source
|
||||
{
|
||||
const auto& src_fn = check_and_get_fn(pl_fn, "source", name);
|
||||
auto src_name =
|
||||
check_and_read<std::string>(src_fn, "name", "source");
|
||||
auto latency =
|
||||
check_and_read<double>(src_fn, "latency", "source");
|
||||
auto output =
|
||||
check_and_read<OutputDescr>(src_fn, "output", "source");
|
||||
// NB: In case BENCHMARK mode sources work with zero latency.
|
||||
if (app_mode == AppMode::BENCHMARK) {
|
||||
latency = 0.0;
|
||||
}
|
||||
builder.setSource(src_name, latency, output);
|
||||
}
|
||||
|
||||
const auto& nodes_fn = check_and_get_fn(pl_fn, "nodes", name);
|
||||
if (!nodes_fn.isSeq()) {
|
||||
throw std::logic_error("nodes in " + name + " must be a sequence");
|
||||
}
|
||||
for (auto node_fn : nodes_fn) {
|
||||
auto node_name =
|
||||
check_and_read<std::string>(node_fn, "name", "node");
|
||||
auto node_type =
|
||||
check_and_read<std::string>(node_fn, "type", "node");
|
||||
if (node_type == "Dummy") {
|
||||
auto time =
|
||||
check_and_read<double>(node_fn, "time", node_name);
|
||||
if (time < 0) {
|
||||
throw std::logic_error(node_name + " time must be positive");
|
||||
}
|
||||
auto output =
|
||||
check_and_read<OutputDescr>(node_fn, "output", node_name);
|
||||
builder.addDummy(node_name, time, output);
|
||||
} else if (node_type == "Infer") {
|
||||
InferParams params;
|
||||
params.path = read<ModelPath>(node_fn);
|
||||
params.device =
|
||||
check_and_read<std::string>(node_fn, "device", node_name);
|
||||
params.input_layers =
|
||||
readList<std::string>(node_fn, "input_layers", node_name);
|
||||
params.output_layers =
|
||||
readList<std::string>(node_fn, "output_layers", node_name);
|
||||
params.config = config;
|
||||
builder.addInfer(node_name, params);
|
||||
} else {
|
||||
throw std::logic_error("Unsupported node type: " + node_type);
|
||||
}
|
||||
}
|
||||
|
||||
const auto edges_fn = check_and_get_fn(pl_fn, "edges", name);
|
||||
if (!edges_fn.isSeq()) {
|
||||
throw std::logic_error("edges in " + name + " must be a sequence");
|
||||
}
|
||||
for (auto edge_fn : edges_fn) {
|
||||
auto edge = read<Edge>(edge_fn);
|
||||
builder.addEdge(edge);
|
||||
}
|
||||
|
||||
// NB: Pipeline mode from config takes priority over cmd.
|
||||
auto mode = readOpt<std::string>(pl_fn["mode"]);
|
||||
builder.setMode(mode.has_value() ? strToPLMode(mode.value()) : pl_mode);
|
||||
|
||||
// NB: Queue capacity from config takes priority over cmd.
|
||||
auto config_qc = readOpt<int>(pl_fn["queue_capacity"]);
|
||||
auto queue_capacity = config_qc.has_value() ? config_qc.value() : qc;
|
||||
// NB: 0 is special constant that means
|
||||
// queue capacity should be calculated automatically.
|
||||
if (queue_capacity != 0) {
|
||||
builder.setQueueCapacity(queue_capacity);
|
||||
}
|
||||
|
||||
auto dump = readOpt<std::string>(pl_fn["dump"]);
|
||||
if (dump) {
|
||||
builder.setDumpFilePath(dump.value());
|
||||
}
|
||||
|
||||
pipelines.emplace_back(builder.build());
|
||||
}
|
||||
|
||||
// NB: Compille pipelines
|
||||
for (size_t i = 0; i < pipelines.size(); ++i) {
|
||||
pipelines[i]->compile();
|
||||
}
|
||||
|
||||
// NB: Execute pipelines
|
||||
std::vector<std::thread> threads(pipelines.size());
|
||||
for (size_t i = 0; i < pipelines.size(); ++i) {
|
||||
threads[i] = std::thread([&, i]() {
|
||||
pipelines[i]->run(work_time_ms);
|
||||
});
|
||||
}
|
||||
|
||||
std::ofstream file;
|
||||
if (!log_file.empty()) {
|
||||
file.open(log_file);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < threads.size(); ++i) {
|
||||
threads[i].join();
|
||||
if (file.is_open()) {
|
||||
file << pipelines[i]->report().toStr(true) << std::endl;
|
||||
}
|
||||
std::cout << pipelines[i]->report().toStr() << std::endl;
|
||||
}
|
||||
} catch (std::exception& e) {
|
||||
std::cout << e.what() << std::endl;
|
||||
throw;
|
||||
}
|
||||
return 0;
|
||||
}
|
66
modules/gapi/samples/pipeline_modeling_tool/dummy_source.hpp
Normal file
66
modules/gapi/samples/pipeline_modeling_tool/dummy_source.hpp
Normal file
@ -0,0 +1,66 @@
|
||||
#ifndef OPENCV_GAPI_PIPELINE_MODELING_TOOL_DUMMY_SOURCE_HPP
|
||||
#define OPENCV_GAPI_PIPELINE_MODELING_TOOL_DUMMY_SOURCE_HPP
|
||||
|
||||
#include <thread>
|
||||
#include <memory>
|
||||
#include <chrono>
|
||||
|
||||
#include <opencv2/gapi.hpp>
|
||||
#include <opencv2/gapi/streaming/cap.hpp> // cv::gapi::wip::IStreamSource
|
||||
|
||||
#include "utils.hpp"
|
||||
|
||||
class DummySource final: public cv::gapi::wip::IStreamSource {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<DummySource>;
|
||||
DummySource(const double latency,
|
||||
const OutputDescr& output);
|
||||
bool pull(cv::gapi::wip::Data& data) override;
|
||||
cv::GMetaArg descr_of() const override;
|
||||
|
||||
private:
|
||||
double m_latency;
|
||||
cv::Mat m_mat;
|
||||
using TimePoint =
|
||||
std::chrono::time_point<std::chrono::high_resolution_clock>;
|
||||
cv::optional<TimePoint> m_prev_pull_tp;
|
||||
};
|
||||
|
||||
DummySource::DummySource(const double latency,
|
||||
const OutputDescr& output)
|
||||
: m_latency(latency), m_mat(output.dims, output.precision) {
|
||||
if (output.dims.size() == 1) {
|
||||
//FIXME: Well-known 1D mat WA
|
||||
m_mat.dims = 1;
|
||||
}
|
||||
utils::generateRandom(m_mat);
|
||||
}
|
||||
|
||||
bool DummySource::pull(cv::gapi::wip::Data& data) {
|
||||
using namespace std::chrono;
|
||||
using namespace cv::gapi::streaming;
|
||||
// NB: In case it's the first pull.
|
||||
if (!m_prev_pull_tp) {
|
||||
m_prev_pull_tp = cv::util::make_optional(high_resolution_clock::now());
|
||||
}
|
||||
// NB: Just increase reference counter not to release mat memory
|
||||
// after assigning it to the data.
|
||||
cv::Mat mat = m_mat;
|
||||
auto end = high_resolution_clock::now();
|
||||
auto elapsed =
|
||||
duration_cast<duration<double, std::milli>>(end - *m_prev_pull_tp).count();
|
||||
auto delta = m_latency - elapsed;
|
||||
if (delta > 0) {
|
||||
utils::sleep(delta);
|
||||
}
|
||||
data.meta[meta_tag::timestamp] = int64_t{utils::timestamp<milliseconds>()};
|
||||
data = mat;
|
||||
m_prev_pull_tp = cv::util::make_optional(high_resolution_clock::now());
|
||||
return true;
|
||||
}
|
||||
|
||||
cv::GMetaArg DummySource::descr_of() const {
|
||||
return cv::GMetaArg{cv::descr_of(m_mat)};
|
||||
}
|
||||
|
||||
#endif // OPENCV_GAPI_PIPELINE_MODELING_TOOL_DUMMY_SOURCE_HPP
|
204
modules/gapi/samples/pipeline_modeling_tool/pipeline.hpp
Normal file
204
modules/gapi/samples/pipeline_modeling_tool/pipeline.hpp
Normal file
@ -0,0 +1,204 @@
|
||||
#ifndef OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_HPP
|
||||
#define OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_HPP
|
||||
|
||||
struct PerfReport {
|
||||
std::string name;
|
||||
double avg_latency = 0.0;
|
||||
double throughput = 0.0;
|
||||
int64_t first_run_latency = 0;
|
||||
int64_t elapsed = 0;
|
||||
int64_t compilation_time = 0;
|
||||
std::vector<int64_t> latencies;
|
||||
|
||||
std::string toStr(bool expanded = false) const;
|
||||
};
|
||||
|
||||
std::string PerfReport::toStr(bool expand) const {
|
||||
std::stringstream ss;
|
||||
ss << name << ": Compilation time: " << compilation_time << " ms; "
|
||||
<< "Average latency: " << avg_latency << " ms; Throughput: "
|
||||
<< throughput << " FPS; First latency: "
|
||||
<< first_run_latency << " ms";
|
||||
|
||||
if (expand) {
|
||||
ss << "\nTotal processed frames: " << latencies.size()
|
||||
<< "\nTotal elapsed time: " << elapsed << " ms" << std::endl;
|
||||
for (size_t i = 0; i < latencies.size(); ++i) {
|
||||
ss << std::endl;
|
||||
ss << "Frame:" << i << "\nLatency: "
|
||||
<< latencies[i] << " ms";
|
||||
}
|
||||
}
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
class Pipeline {
|
||||
public:
|
||||
using Ptr = std::shared_ptr<Pipeline>;
|
||||
|
||||
Pipeline(std::string&& name,
|
||||
cv::GComputation&& comp,
|
||||
cv::gapi::wip::IStreamSource::Ptr&& src,
|
||||
cv::GCompileArgs&& args,
|
||||
const size_t num_outputs);
|
||||
|
||||
void compile();
|
||||
void run(double work_time_ms);
|
||||
const PerfReport& report() const;
|
||||
|
||||
virtual ~Pipeline() = default;
|
||||
|
||||
protected:
|
||||
struct RunPerf {
|
||||
int64_t elapsed = 0;
|
||||
std::vector<int64_t> latencies;
|
||||
};
|
||||
|
||||
virtual void _compile() = 0;
|
||||
virtual RunPerf _run(double work_time_ms) = 0;
|
||||
|
||||
std::string m_name;
|
||||
cv::GComputation m_comp;
|
||||
cv::gapi::wip::IStreamSource::Ptr m_src;
|
||||
cv::GCompileArgs m_args;
|
||||
size_t m_num_outputs;
|
||||
PerfReport m_perf;
|
||||
};
|
||||
|
||||
Pipeline::Pipeline(std::string&& name,
|
||||
cv::GComputation&& comp,
|
||||
cv::gapi::wip::IStreamSource::Ptr&& src,
|
||||
cv::GCompileArgs&& args,
|
||||
const size_t num_outputs)
|
||||
: m_name(std::move(name)),
|
||||
m_comp(std::move(comp)),
|
||||
m_src(std::move(src)),
|
||||
m_args(std::move(args)),
|
||||
m_num_outputs(num_outputs) {
|
||||
m_perf.name = m_name;
|
||||
}
|
||||
|
||||
void Pipeline::compile() {
|
||||
m_perf.compilation_time =
|
||||
utils::measure<std::chrono::milliseconds>([this]() {
|
||||
_compile();
|
||||
});
|
||||
}
|
||||
|
||||
void Pipeline::run(double work_time_ms) {
|
||||
auto run_perf = _run(work_time_ms);
|
||||
|
||||
m_perf.elapsed = run_perf.elapsed;
|
||||
m_perf.latencies = std::move(run_perf.latencies);
|
||||
|
||||
m_perf.avg_latency =
|
||||
std::accumulate(m_perf.latencies.begin(),
|
||||
m_perf.latencies.end(),
|
||||
0.0) / static_cast<double>(m_perf.latencies.size());
|
||||
m_perf.throughput =
|
||||
(m_perf.latencies.size() / static_cast<double>(m_perf.elapsed)) * 1000;
|
||||
|
||||
m_perf.first_run_latency = m_perf.latencies[0];
|
||||
}
|
||||
|
||||
const PerfReport& Pipeline::report() const {
|
||||
return m_perf;
|
||||
}
|
||||
|
||||
class StreamingPipeline : public Pipeline {
|
||||
public:
|
||||
using Pipeline::Pipeline;
|
||||
|
||||
private:
|
||||
void _compile() override {
|
||||
m_compiled =
|
||||
m_comp.compileStreaming({m_src->descr_of()},
|
||||
cv::GCompileArgs(m_args));
|
||||
}
|
||||
|
||||
Pipeline::RunPerf _run(double work_time_ms) override {
|
||||
// NB: Setup.
|
||||
using namespace std::chrono;
|
||||
// NB: N-1 buffers + timestamp.
|
||||
std::vector<cv::Mat> out_mats(m_num_outputs - 1);
|
||||
int64_t start_ts = -1;
|
||||
cv::GRunArgsP pipeline_outputs;
|
||||
for (auto& m : out_mats) {
|
||||
pipeline_outputs += cv::gout(m);
|
||||
}
|
||||
pipeline_outputs += cv::gout(start_ts);
|
||||
m_compiled.setSource(m_src);
|
||||
|
||||
// NB: Start execution & measure performance statistics.
|
||||
Pipeline::RunPerf perf;
|
||||
auto start = high_resolution_clock::now();
|
||||
m_compiled.start();
|
||||
while (m_compiled.pull(cv::GRunArgsP{pipeline_outputs})) {
|
||||
int64_t latency = utils::timestamp<milliseconds>() - start_ts;
|
||||
|
||||
perf.latencies.push_back(latency);
|
||||
perf.elapsed = duration_cast<milliseconds>(
|
||||
high_resolution_clock::now() - start).count();
|
||||
|
||||
if (perf.elapsed >= work_time_ms) {
|
||||
m_compiled.stop();
|
||||
break;
|
||||
}
|
||||
};
|
||||
return perf;
|
||||
}
|
||||
|
||||
cv::GStreamingCompiled m_compiled;
|
||||
};
|
||||
|
||||
class RegularPipeline : public Pipeline {
|
||||
public:
|
||||
using Pipeline::Pipeline;
|
||||
|
||||
private:
|
||||
void _compile() override {
|
||||
m_compiled =
|
||||
m_comp.compile({m_src->descr_of()},
|
||||
cv::GCompileArgs(m_args));
|
||||
}
|
||||
|
||||
Pipeline::RunPerf _run(double work_time_ms) override {
|
||||
// NB: Setup
|
||||
using namespace std::chrono;
|
||||
cv::gapi::wip::Data d;
|
||||
std::vector<cv::Mat> out_mats(m_num_outputs);
|
||||
cv::GRunArgsP pipeline_outputs;
|
||||
for (auto& m : out_mats) {
|
||||
pipeline_outputs += cv::gout(m);
|
||||
}
|
||||
|
||||
// NB: Start execution & measure performance statistics.
|
||||
Pipeline::RunPerf perf;
|
||||
auto start = high_resolution_clock::now();
|
||||
while (m_src->pull(d)) {
|
||||
auto in_mat = cv::util::get<cv::Mat>(d);
|
||||
int64_t latency = utils::measure<milliseconds>([&]{
|
||||
m_compiled(cv::gin(in_mat), cv::GRunArgsP{pipeline_outputs});
|
||||
});
|
||||
|
||||
perf.latencies.push_back(latency);
|
||||
perf.elapsed = duration_cast<milliseconds>(
|
||||
high_resolution_clock::now() - start).count();
|
||||
|
||||
if (perf.elapsed >= work_time_ms) {
|
||||
break;
|
||||
}
|
||||
};
|
||||
return perf;
|
||||
}
|
||||
|
||||
cv::GCompiled m_compiled;
|
||||
};
|
||||
|
||||
enum class PLMode {
|
||||
REGULAR,
|
||||
STREAMING
|
||||
};
|
||||
|
||||
#endif // OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_HPP
|
502
modules/gapi/samples/pipeline_modeling_tool/pipeline_builder.hpp
Normal file
502
modules/gapi/samples/pipeline_modeling_tool/pipeline_builder.hpp
Normal file
@ -0,0 +1,502 @@
|
||||
#ifndef OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_BUILDER_HPP
|
||||
#define OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_BUILDER_HPP
|
||||
|
||||
#include <map>
|
||||
|
||||
#include <opencv2/gapi/infer.hpp> // cv::gapi::GNetPackage
|
||||
#include <opencv2/gapi/streaming/cap.hpp> // cv::gapi::wip::IStreamSource
|
||||
#include <opencv2/gapi/infer/ie.hpp> // cv::gapi::ie::Params
|
||||
#include <opencv2/gapi/gcommon.hpp> // cv::gapi::GCompileArgs
|
||||
#include <opencv2/gapi/cpu/gcpukernel.hpp> // GAPI_OCV_KERNEL
|
||||
#include <opencv2/gapi/gkernel.hpp> // G_API_OP
|
||||
|
||||
#include "pipeline.hpp"
|
||||
#include "utils.hpp"
|
||||
|
||||
struct Edge {
|
||||
struct P {
|
||||
std::string name;
|
||||
size_t port;
|
||||
};
|
||||
|
||||
P src;
|
||||
P dst;
|
||||
};
|
||||
|
||||
struct CallNode {
|
||||
using F = std::function<void(const cv::GProtoArgs&, cv::GProtoArgs&)>;
|
||||
|
||||
std::string name;
|
||||
F run;
|
||||
};
|
||||
|
||||
struct DataNode {
|
||||
cv::optional<cv::GProtoArg> arg;
|
||||
};
|
||||
|
||||
struct Node {
|
||||
using Ptr = std::shared_ptr<Node>;
|
||||
using WPtr = std::weak_ptr<Node>;
|
||||
using Kind = cv::util::variant<CallNode, DataNode>;
|
||||
|
||||
std::vector<Node::WPtr> in_nodes;
|
||||
std::vector<Node::Ptr> out_nodes;
|
||||
Kind kind;
|
||||
};
|
||||
|
||||
struct DummyCall {
|
||||
G_API_OP(GDummy,
|
||||
<cv::GMat(cv::GMat, double, OutputDescr)>,
|
||||
"custom.dummy") {
|
||||
static cv::GMatDesc outMeta(const cv::GMatDesc& /* in */,
|
||||
double /* time */,
|
||||
const OutputDescr& output) {
|
||||
if (output.dims.size() == 2) {
|
||||
return cv::GMatDesc(output.precision,
|
||||
1,
|
||||
cv::Size(output.dims[0], output.dims[1]));
|
||||
}
|
||||
return cv::GMatDesc(output.precision, output.dims);
|
||||
}
|
||||
};
|
||||
|
||||
struct DummyState {
|
||||
cv::Mat mat;
|
||||
};
|
||||
|
||||
// NB: Generate random mat once and then
|
||||
// copy to dst buffer on every iteration.
|
||||
GAPI_OCV_KERNEL_ST(GCPUDummy, GDummy, DummyState) {
|
||||
static void setup(const cv::GMatDesc& /*in*/,
|
||||
double /*time*/,
|
||||
const OutputDescr& output,
|
||||
std::shared_ptr<DummyState>& state,
|
||||
const cv::GCompileArgs& /*args*/) {
|
||||
state.reset(new DummyState{});
|
||||
state->mat.create(output.dims, output.precision);
|
||||
utils::generateRandom(state->mat);
|
||||
}
|
||||
|
||||
static void run(const cv::Mat& /*in_mat*/,
|
||||
double time,
|
||||
const OutputDescr& /*output*/,
|
||||
cv::Mat& out_mat,
|
||||
DummyState& state) {
|
||||
using namespace std::chrono;
|
||||
double total = 0;
|
||||
auto start = high_resolution_clock::now();
|
||||
state.mat.copyTo(out_mat);
|
||||
while (total < time) {
|
||||
total = duration_cast<duration<double, std::milli>>(
|
||||
high_resolution_clock::now() - start).count();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
void operator()(const cv::GProtoArgs& inputs, cv::GProtoArgs& outputs);
|
||||
|
||||
size_t numInputs() const { return 1; }
|
||||
size_t numOutputs() const { return 1; }
|
||||
|
||||
double time;
|
||||
OutputDescr output;
|
||||
};
|
||||
|
||||
void DummyCall::operator()(const cv::GProtoArgs& inputs,
|
||||
cv::GProtoArgs& outputs) {
|
||||
GAPI_Assert(inputs.size() == 1u);
|
||||
GAPI_Assert(cv::util::holds_alternative<cv::GMat>(inputs[0]));
|
||||
GAPI_Assert(outputs.empty());
|
||||
auto in = cv::util::get<cv::GMat>(inputs[0]);
|
||||
outputs.emplace_back(GDummy::on(in, time, output));
|
||||
}
|
||||
|
||||
struct InferCall {
|
||||
void operator()(const cv::GProtoArgs& inputs, cv::GProtoArgs& outputs);
|
||||
size_t numInputs() const { return input_layers.size(); }
|
||||
size_t numOutputs() const { return output_layers.size(); }
|
||||
|
||||
std::string tag;
|
||||
std::vector<std::string> input_layers;
|
||||
std::vector<std::string> output_layers;
|
||||
};
|
||||
|
||||
void InferCall::operator()(const cv::GProtoArgs& inputs,
|
||||
cv::GProtoArgs& outputs) {
|
||||
GAPI_Assert(inputs.size() == input_layers.size());
|
||||
GAPI_Assert(outputs.empty());
|
||||
|
||||
cv::GInferInputs g_inputs;
|
||||
// TODO: Add an opportunity not specify input/output layers in case
|
||||
// there is only single layer.
|
||||
for (size_t i = 0; i < inputs.size(); ++i) {
|
||||
// TODO: Support GFrame as well.
|
||||
GAPI_Assert(cv::util::holds_alternative<cv::GMat>(inputs[i]));
|
||||
auto in = cv::util::get<cv::GMat>(inputs[i]);
|
||||
g_inputs[input_layers[i]] = in;
|
||||
}
|
||||
auto g_outputs = cv::gapi::infer<cv::gapi::Generic>(tag, g_inputs);
|
||||
for (size_t i = 0; i < output_layers.size(); ++i) {
|
||||
outputs.emplace_back(g_outputs.at(output_layers[i]));
|
||||
}
|
||||
}
|
||||
|
||||
struct SourceCall {
|
||||
void operator()(const cv::GProtoArgs& inputs, cv::GProtoArgs& outputs);
|
||||
size_t numInputs() const { return 0; }
|
||||
size_t numOutputs() const { return 1; }
|
||||
};
|
||||
|
||||
void SourceCall::operator()(const cv::GProtoArgs& inputs,
|
||||
cv::GProtoArgs& outputs) {
|
||||
GAPI_Assert(inputs.empty());
|
||||
GAPI_Assert(outputs.empty());
|
||||
// NB: Since NV12 isn't exposed source always produce GMat.
|
||||
outputs.emplace_back(cv::GMat());
|
||||
}
|
||||
|
||||
struct LoadPath {
|
||||
std::string xml;
|
||||
std::string bin;
|
||||
};
|
||||
|
||||
struct ImportPath {
|
||||
std::string blob;
|
||||
};
|
||||
|
||||
using ModelPath = cv::util::variant<ImportPath, LoadPath>;
|
||||
|
||||
struct InferParams {
|
||||
std::string name;
|
||||
ModelPath path;
|
||||
std::string device;
|
||||
std::vector<std::string> input_layers;
|
||||
std::vector<std::string> output_layers;
|
||||
std::map<std::string, std::string> config;
|
||||
};
|
||||
|
||||
class PipelineBuilder {
|
||||
public:
|
||||
PipelineBuilder();
|
||||
void addDummy(const std::string& name,
|
||||
const double time,
|
||||
const OutputDescr& output);
|
||||
|
||||
void addInfer(const std::string& name, const InferParams& params);
|
||||
|
||||
void setSource(const std::string& name,
|
||||
double latency,
|
||||
const OutputDescr& output);
|
||||
|
||||
void addEdge(const Edge& edge);
|
||||
void setMode(PLMode mode);
|
||||
void setDumpFilePath(const std::string& dump);
|
||||
void setQueueCapacity(const size_t qc);
|
||||
void setName(const std::string& name);
|
||||
|
||||
Pipeline::Ptr build();
|
||||
|
||||
private:
|
||||
template <typename CallT>
|
||||
void addCall(const std::string& name,
|
||||
CallT&& call);
|
||||
|
||||
Pipeline::Ptr construct();
|
||||
|
||||
template <typename K, typename V>
|
||||
using M = std::unordered_map<K, V>;
|
||||
struct State {
|
||||
struct NodeEdges {
|
||||
std::vector<Edge> input_edges;
|
||||
std::vector<Edge> output_edges;
|
||||
};
|
||||
|
||||
M<std::string, Node::Ptr> calls_map;
|
||||
std::vector<Node::Ptr> all_calls;
|
||||
|
||||
cv::gapi::GNetPackage networks;
|
||||
cv::gapi::GKernelPackage kernels;
|
||||
cv::GCompileArgs compile_args;
|
||||
cv::gapi::wip::IStreamSource::Ptr src;
|
||||
PLMode mode = PLMode::STREAMING;
|
||||
std::string name;
|
||||
};
|
||||
|
||||
std::unique_ptr<State> m_state;
|
||||
};
|
||||
|
||||
PipelineBuilder::PipelineBuilder() : m_state(new State{}) { };
|
||||
|
||||
void PipelineBuilder::addDummy(const std::string& name,
|
||||
const double time,
|
||||
const OutputDescr& output) {
|
||||
m_state->kernels.include<DummyCall::GCPUDummy>();
|
||||
addCall(name, DummyCall{time, output});
|
||||
}
|
||||
|
||||
template <typename CallT>
|
||||
void PipelineBuilder::addCall(const std::string& name,
|
||||
CallT&& call) {
|
||||
|
||||
size_t num_inputs = call.numInputs();
|
||||
size_t num_outputs = call.numOutputs();
|
||||
Node::Ptr call_node(new Node{{},{},Node::Kind{CallNode{name, std::move(call)}}});
|
||||
// NB: Create placeholders for inputs.
|
||||
call_node->in_nodes.resize(num_inputs);
|
||||
// NB: Create outputs with empty data.
|
||||
for (size_t i = 0; i < num_outputs; ++i) {
|
||||
call_node->out_nodes.emplace_back(new Node{{call_node},
|
||||
{},
|
||||
Node::Kind{DataNode{}}});
|
||||
}
|
||||
|
||||
auto it = m_state->calls_map.find(name);
|
||||
if (it != m_state->calls_map.end()) {
|
||||
throw std::logic_error("Node: " + name + " already exists!");
|
||||
}
|
||||
m_state->calls_map.emplace(name, call_node);
|
||||
m_state->all_calls.emplace_back(call_node);
|
||||
}
|
||||
|
||||
void PipelineBuilder::addInfer(const std::string& name,
|
||||
const InferParams& params) {
|
||||
// NB: No default ctor for Params.
|
||||
std::unique_ptr<cv::gapi::ie::Params<cv::gapi::Generic>> pp;
|
||||
if (cv::util::holds_alternative<LoadPath>(params.path)) {
|
||||
auto load_path = cv::util::get<LoadPath>(params.path);
|
||||
pp.reset(new cv::gapi::ie::Params<cv::gapi::Generic>(name,
|
||||
load_path.xml,
|
||||
load_path.bin,
|
||||
params.device));
|
||||
} else {
|
||||
GAPI_Assert(cv::util::holds_alternative<ImportPath>(params.path));
|
||||
auto import_path = cv::util::get<ImportPath>(params.path);
|
||||
pp.reset(new cv::gapi::ie::Params<cv::gapi::Generic>(name,
|
||||
import_path.blob,
|
||||
params.device));
|
||||
}
|
||||
|
||||
pp->pluginConfig(params.config);
|
||||
m_state->networks += cv::gapi::networks(*pp);
|
||||
|
||||
addCall(name, InferCall{name, params.input_layers, params.output_layers});
|
||||
}
|
||||
|
||||
void PipelineBuilder::addEdge(const Edge& edge) {
|
||||
const auto& src_it = m_state->calls_map.find(edge.src.name);
|
||||
if (src_it == m_state->calls_map.end()) {
|
||||
throw std::logic_error("Failed to find node: " + edge.src.name);
|
||||
}
|
||||
auto src_node = src_it->second;
|
||||
if (src_node->out_nodes.size() <= edge.src.port) {
|
||||
throw std::logic_error("Failed to access node: " + edge.src.name +
|
||||
" by out port: " + std::to_string(edge.src.port));
|
||||
}
|
||||
|
||||
auto dst_it = m_state->calls_map.find(edge.dst.name);
|
||||
if (dst_it == m_state->calls_map.end()) {
|
||||
throw std::logic_error("Failed to find node: " + edge.dst.name);
|
||||
}
|
||||
auto dst_node = dst_it->second;
|
||||
if (dst_node->in_nodes.size() <= edge.dst.port) {
|
||||
throw std::logic_error("Failed to access node: " + edge.dst.name +
|
||||
" by in port: " + std::to_string(edge.dst.port));
|
||||
}
|
||||
|
||||
auto out_data = src_node->out_nodes[edge.src.port];
|
||||
auto& in_data = dst_node->in_nodes[edge.dst.port];
|
||||
// NB: in_data != nullptr.
|
||||
if (!in_data.expired()) {
|
||||
throw std::logic_error("Node: " + edge.dst.name +
|
||||
" already connected by in port: " +
|
||||
std::to_string(edge.dst.port));
|
||||
}
|
||||
dst_node->in_nodes[edge.dst.port] = out_data;
|
||||
out_data->out_nodes.push_back(dst_node);
|
||||
}
|
||||
|
||||
void PipelineBuilder::setSource(const std::string& name,
|
||||
double latency,
|
||||
const OutputDescr& output) {
|
||||
GAPI_Assert(!m_state->src);
|
||||
m_state->src = std::make_shared<DummySource>(latency, output);
|
||||
addCall(name, SourceCall{});
|
||||
}
|
||||
|
||||
void PipelineBuilder::setMode(PLMode mode) {
|
||||
m_state->mode = mode;
|
||||
}
|
||||
|
||||
void PipelineBuilder::setDumpFilePath(const std::string& dump) {
|
||||
m_state->compile_args.emplace_back(cv::graph_dump_path{dump});
|
||||
}
|
||||
|
||||
void PipelineBuilder::setQueueCapacity(const size_t qc) {
|
||||
m_state->compile_args.emplace_back(cv::gapi::streaming::queue_capacity{qc});
|
||||
}
|
||||
|
||||
void PipelineBuilder::setName(const std::string& name) {
|
||||
m_state->name = name;
|
||||
}
|
||||
|
||||
static bool visit(Node::Ptr node,
|
||||
std::vector<Node::Ptr>& sorted,
|
||||
std::unordered_map<Node::Ptr, int>& visited) {
|
||||
if (!node) {
|
||||
throw std::logic_error("Found null node");
|
||||
}
|
||||
|
||||
visited[node] = 1;
|
||||
for (auto in : node->in_nodes) {
|
||||
auto in_node = in.lock();
|
||||
if (visited[in_node] == 0) {
|
||||
if (visit(in_node, sorted, visited)) {
|
||||
return true;
|
||||
}
|
||||
} else if (visited[in_node] == 1) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
visited[node] = 2;
|
||||
sorted.push_back(node);
|
||||
return false;
|
||||
}
|
||||
|
||||
static cv::optional<std::vector<Node::Ptr>>
|
||||
toposort(const std::vector<Node::Ptr> nodes) {
|
||||
std::vector<Node::Ptr> sorted;
|
||||
std::unordered_map<Node::Ptr, int> visited;
|
||||
for (auto n : nodes) {
|
||||
if (visit(n, sorted, visited)) {
|
||||
return cv::optional<std::vector<Node::Ptr>>{};
|
||||
}
|
||||
}
|
||||
return cv::util::make_optional(sorted);
|
||||
}
|
||||
|
||||
Pipeline::Ptr PipelineBuilder::construct() {
|
||||
// NB: Unlike G-API, pipeline_builder_tool graph always starts with CALL node
|
||||
// (not data) that produce datas, so the call node which doesn't have
|
||||
// inputs is considered as "producer" node.
|
||||
//
|
||||
// Graph always starts with CALL node and ends with DATA node.
|
||||
// Graph example: [source] -> (source:0) -> [PP] -> (PP:0)
|
||||
//
|
||||
// The algorithm is quite simple:
|
||||
// 0. Verify that every call input node exists (connected).
|
||||
// 1. Sort all nodes by visiting only call nodes,
|
||||
// since there is no data nodes that's not connected with any call node,
|
||||
// it's guarantee that every node will be visited.
|
||||
// 2. Fillter call nodes.
|
||||
// 3. Go through every call node.
|
||||
// FIXME: Add toposort in case user passed nodes
|
||||
// in arbitrary order which is unlikely happened.
|
||||
// 4. Extract proto input from every input node
|
||||
// 5. Run call and get outputs
|
||||
// 6. If call node doesn't have inputs it means that it's "producer" node,
|
||||
// so collect all outputs to graph_inputs vector.
|
||||
// 7. Assign proto outputs to output data nodes,
|
||||
// so the next calls can use them as inputs.
|
||||
cv::GProtoArgs graph_inputs;
|
||||
cv::GProtoArgs graph_outputs;
|
||||
// 0. Verify that every call input node exists (connected).
|
||||
for (auto call_node : m_state->all_calls) {
|
||||
for (size_t i = 0; i < call_node->in_nodes.size(); ++i) {
|
||||
const auto& in_data_node = call_node->in_nodes[i];
|
||||
// NB: in_data_node == nullptr.
|
||||
if (in_data_node.expired()) {
|
||||
const auto& call = cv::util::get<CallNode>(call_node->kind);
|
||||
throw std::logic_error(
|
||||
"Node: " + call.name + " in Pipeline: " + m_state->name +
|
||||
" has dangling input by in port: " + std::to_string(i));
|
||||
}
|
||||
}
|
||||
}
|
||||
// (0) Sort all nodes;
|
||||
auto has_sorted = toposort(m_state->all_calls);
|
||||
if (!has_sorted) {
|
||||
throw std::logic_error(
|
||||
"Pipeline: " + m_state->name + " has cyclic dependencies") ;
|
||||
}
|
||||
auto& sorted = has_sorted.value();
|
||||
// (1). Fillter call nodes.
|
||||
std::vector<Node::Ptr> sorted_calls;
|
||||
for (auto n : sorted) {
|
||||
if (cv::util::holds_alternative<CallNode>(n->kind)) {
|
||||
sorted_calls.push_back(n);
|
||||
}
|
||||
}
|
||||
// (2). Go through every call node.
|
||||
for (auto call_node : sorted_calls) {
|
||||
cv::GProtoArgs outputs;
|
||||
cv::GProtoArgs inputs;
|
||||
for (size_t i = 0; i < call_node->in_nodes.size(); ++i) {
|
||||
auto in_node = call_node->in_nodes.at(i);
|
||||
auto in_data = cv::util::get<DataNode>(in_node.lock()->kind);
|
||||
if (!in_data.arg.has_value()) {
|
||||
throw std::logic_error("data hasn't been provided");
|
||||
}
|
||||
// (3). Extract proto input from every input node.
|
||||
inputs.push_back(in_data.arg.value());
|
||||
}
|
||||
// (4). Run call and get outputs.
|
||||
auto call = cv::util::get<CallNode>(call_node->kind);
|
||||
call.run(inputs, outputs);
|
||||
// (5) If call node doesn't have inputs
|
||||
// it means that it's input producer node (Source).
|
||||
if (call_node->in_nodes.empty()) {
|
||||
for (auto out : outputs) {
|
||||
graph_inputs.push_back(out);
|
||||
}
|
||||
}
|
||||
// (6). Assign proto outputs to output data nodes,
|
||||
// so the next calls can use them as inputs.
|
||||
GAPI_Assert(outputs.size() == call_node->out_nodes.size());
|
||||
for (size_t i = 0; i < outputs.size(); ++i) {
|
||||
auto out_node = call_node->out_nodes[i];
|
||||
auto& out_data = cv::util::get<DataNode>(out_node->kind);
|
||||
out_data.arg = cv::util::make_optional(outputs[i]);
|
||||
if (out_node->out_nodes.empty()) {
|
||||
graph_outputs.push_back(out_data.arg.value());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m_state->compile_args.emplace_back(m_state->networks);
|
||||
m_state->compile_args.emplace_back(m_state->kernels);
|
||||
|
||||
if (m_state->mode == PLMode::STREAMING) {
|
||||
GAPI_Assert(graph_inputs.size() == 1);
|
||||
GAPI_Assert(cv::util::holds_alternative<cv::GMat>(graph_inputs[0]));
|
||||
// FIXME: Handle GFrame when NV12 comes.
|
||||
const auto& graph_input = cv::util::get<cv::GMat>(graph_inputs[0]);
|
||||
// NB: In case streaming mode need to expose timestamp in order to
|
||||
// calculate performance metrics.
|
||||
graph_outputs.emplace_back(
|
||||
cv::gapi::streaming::timestamp(graph_input).strip());
|
||||
|
||||
return std::make_shared<StreamingPipeline>(std::move(m_state->name),
|
||||
cv::GComputation(
|
||||
cv::GProtoInputArgs{graph_inputs},
|
||||
cv::GProtoOutputArgs{graph_outputs}),
|
||||
std::move(m_state->src),
|
||||
std::move(m_state->compile_args),
|
||||
graph_outputs.size());
|
||||
}
|
||||
GAPI_Assert(m_state->mode == PLMode::REGULAR);
|
||||
return std::make_shared<RegularPipeline>(std::move(m_state->name),
|
||||
cv::GComputation(
|
||||
cv::GProtoInputArgs{graph_inputs},
|
||||
cv::GProtoOutputArgs{graph_outputs}),
|
||||
std::move(m_state->src),
|
||||
std::move(m_state->compile_args),
|
||||
graph_outputs.size());
|
||||
}
|
||||
|
||||
Pipeline::Ptr PipelineBuilder::build() {
|
||||
auto pipeline = construct();
|
||||
m_state.reset(new State{});
|
||||
return pipeline;
|
||||
}
|
||||
|
||||
#endif // OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_BUILDER_HPP
|
@ -0,0 +1,931 @@
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
pipeline_modeling_tool = os.getenv('PIPELINE_MODELING_TOOL')
|
||||
|
||||
def get_output(exec_str):
|
||||
try:
|
||||
out = subprocess.check_output(exec_str,
|
||||
stderr=subprocess.STDOUT,
|
||||
shell=True).strip().decode()
|
||||
except subprocess.CalledProcessError as exc:
|
||||
out = exc.output.strip().decode()
|
||||
return out
|
||||
|
||||
|
||||
def test_error_no_config_specified():
|
||||
out = get_output(pipeline_modeling_tool)
|
||||
assert out.startswith('Config must be specified via --cfg option')
|
||||
|
||||
|
||||
def test_error_no_config_exists():
|
||||
cfg_file = 'not_existing_cfg.yml'
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert 'Failed to open config file: not_existing_cfg.yml' in out
|
||||
|
||||
|
||||
def test_error_no_work_time():
|
||||
cfg_file = """\"%YAML:1.0\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Config must contain field: work_time')
|
||||
|
||||
|
||||
def test_error_work_time_not_positive():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: -1\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('work_time must be positive')
|
||||
|
||||
|
||||
def test_error_no_pipelines():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Config must contain field: Pipelines')
|
||||
|
||||
|
||||
def test_error_pipelines_node_not_map():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Pipelines field must be a map')
|
||||
|
||||
|
||||
def test_error_config_not_contain_pl():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:\" """
|
||||
|
||||
exec_str = '{} --cfg={} --exec_list=PL2'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Pipelines must contain field: PL2')
|
||||
|
||||
|
||||
def test_error_no_source():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('PL1 must contain field: source')
|
||||
|
||||
|
||||
def test_error_source_no_name():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('source must contain field: name')
|
||||
|
||||
|
||||
def test_error_source_no_latency():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('source must contain field: latency')
|
||||
|
||||
|
||||
def test_error_source_no_output():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('source must contain field: output')
|
||||
|
||||
|
||||
def test_error_source_output_no_dims():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('output must contain field: dims')
|
||||
|
||||
|
||||
def test_error_source_output_no_precision():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('output must contain field: precision')
|
||||
|
||||
|
||||
def test_error_no_nodes():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('PL1 must contain field: nodes')
|
||||
|
||||
|
||||
def test_error_nodes_not_sequence():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('nodes in PL1 must be a sequence')
|
||||
|
||||
|
||||
def test_error_node_no_name():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
-\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('node must contain field: name')
|
||||
|
||||
|
||||
def test_error_node_no_type():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('node must contain field: type')
|
||||
|
||||
|
||||
def test_error_node_unknown_type():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Unknown'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Unsupported node type: Unknown')
|
||||
|
||||
|
||||
def test_error_node_dummy_no_time():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Node0 must contain field: time')
|
||||
|
||||
|
||||
def test_error_node_dummy_not_positive_time():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: -0.2\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Node0 time must be positive')
|
||||
|
||||
|
||||
def test_error_node_dummy_no_output():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Node0 must contain field: output')
|
||||
|
||||
|
||||
def test_error_node_infer_no_model_path():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Infer'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
error_msg = """Path to OpenVINO model must be specified in either of two formats:
|
||||
1.
|
||||
xml: path to *.xml
|
||||
bin: path to *.bin
|
||||
2.
|
||||
blob: path to *.blob"""
|
||||
assert out.startswith(error_msg)
|
||||
|
||||
|
||||
def test_error_node_infer_no_input_layers():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Infer'
|
||||
blob: model.blob
|
||||
device: 'CPU'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Node0 must contain field: input_layers')
|
||||
|
||||
|
||||
def test_error_node_infer_input_layers_are_empty():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Infer'
|
||||
blob: model.blob
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('input_layers in Node0 must be a sequence')
|
||||
|
||||
|
||||
def test_error_node_infer_no_output_layers():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Infer'
|
||||
blob: model.blob
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'layer_name'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Node0 must contain field: output_layers')
|
||||
|
||||
|
||||
def test_error_node_infer_output_layers_are_empty():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Infer'
|
||||
blob: model.blob
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'layer_name'
|
||||
output_layers:\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('output_layers in Node0 must be a sequence')
|
||||
|
||||
|
||||
def test_error_no_edges():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('PL1 must contain field: edges')
|
||||
|
||||
|
||||
def test_error_edges_not_sequence():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('edges in PL1 must be a sequence')
|
||||
|
||||
|
||||
def test_error_edges_no_from():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
-\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('edge must contain field: from')
|
||||
|
||||
|
||||
def test_error_edges_no_to():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Node0'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('edge must contain field: to')
|
||||
|
||||
|
||||
def test_error_edges_from_not_exists():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Node1'
|
||||
to: 'Node2'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Failed to find node: Node1')
|
||||
|
||||
|
||||
def test_error_edges_from_port_not_exists():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Node0:10'
|
||||
to: 'Node2'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Failed to access node: Node0 by out port: 10')
|
||||
|
||||
|
||||
def test_error_edges_to_not_exists():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node2'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Failed to find node: Node2')
|
||||
|
||||
|
||||
def test_error_edges_to_port_not_exists():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0:3'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Failed to access node: Node0 by in port: 3')
|
||||
|
||||
|
||||
def test_error_connect_to_source():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Node0'
|
||||
to: 'Src'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Failed to access node: Src by in port: 0')
|
||||
|
||||
|
||||
def test_error_double_edge():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0'
|
||||
- from: 'Src'
|
||||
to: 'Node0'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Node: Node0 already connected by in port: 0')
|
||||
|
||||
|
||||
def test_error_double_edge():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0'
|
||||
- from: 'Src'
|
||||
to: 'Node0'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Node: Node0 already connected by in port: 0')
|
||||
|
||||
|
||||
def test_node_has_dangling_input():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
|
||||
- name: 'Node1'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Node0'
|
||||
to: 'Node1'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
|
||||
assert out.startswith('Node: Node0 in Pipeline: PL1 has dangling input by in port: 0')
|
||||
|
||||
|
||||
def test_error_has_cycle_0():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node'
|
||||
type: 'Infer'
|
||||
blob: 'model.blob'
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'in_layer_name_0'
|
||||
- 'in_layer_name_1'
|
||||
output_layers:
|
||||
- 'out_layer_name'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node:0'
|
||||
- from: 'Node:0'
|
||||
to: 'Node:1'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Pipeline: PL1 has cyclic dependencies')
|
||||
|
||||
|
||||
def test_error_has_cycle_0():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Infer'
|
||||
blob: 'model.blob'
|
||||
device: 'CPU'
|
||||
input_layers:
|
||||
- 'in_layer_name_0'
|
||||
- 'in_layer_name_1'
|
||||
output_layers:
|
||||
- 'out_layer_name'
|
||||
|
||||
- name: 'Node1'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0:0'
|
||||
- from: 'Node0:0'
|
||||
to: 'Node1:0'
|
||||
- from: 'Node1'
|
||||
to: 'Node0:1'\" """
|
||||
|
||||
exec_str = '{} --cfg={}'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Pipeline: PL1 has cyclic dependencies')
|
||||
|
||||
|
||||
def test_error_no_load_config_exists():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0'\" """
|
||||
|
||||
exec_str = '{} --cfg={} --load_config=not_existing.yml'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert 'Failed to load config: not_existing.yml' in out
|
||||
|
||||
|
||||
def test_error_invalid_app_mode():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0'\" """
|
||||
|
||||
exec_str = '{} --cfg={} --pl_mode=unknown'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Unsupported PLMode: unknown\n'
|
||||
'Please chose between: streaming and regular')
|
||||
|
||||
|
||||
def test_error_invalid_pl_mode():
|
||||
cfg_file = """\"%YAML:1.0
|
||||
work_time: 1000
|
||||
Pipelines:
|
||||
PL1:
|
||||
source:
|
||||
name: 'Src'
|
||||
latency: 20
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
nodes:
|
||||
- name: 'Node0'
|
||||
type: 'Dummy'
|
||||
time: 0.2
|
||||
output:
|
||||
dims: [1,2,3,4]
|
||||
precision: 'U8'
|
||||
edges:
|
||||
- from: 'Src'
|
||||
to: 'Node0'\" """
|
||||
|
||||
exec_str = '{} --cfg={} --app_mode=unknown'.format(pipeline_modeling_tool, cfg_file)
|
||||
out = get_output(exec_str)
|
||||
assert out.startswith('Unsupported AppMode: unknown\n'
|
||||
'Please chose between: realtime and benchmark')
|
81
modules/gapi/samples/pipeline_modeling_tool/utils.hpp
Normal file
81
modules/gapi/samples/pipeline_modeling_tool/utils.hpp
Normal file
@ -0,0 +1,81 @@
|
||||
#ifndef OPENCV_GAPI_PIPELINE_MODELING_TOOL_UTILS_HPP
|
||||
#define OPENCV_GAPI_PIPELINE_MODELING_TOOL_UTILS_HPP
|
||||
|
||||
#include <opencv2/core.hpp>
|
||||
|
||||
#if defined(_WIN32)
|
||||
#include <windows.h>
|
||||
#endif
|
||||
|
||||
// FIXME: It's better to place it somewhere in common.hpp
|
||||
struct OutputDescr {
|
||||
std::vector<int> dims;
|
||||
int precision;
|
||||
};
|
||||
|
||||
namespace utils {
|
||||
inline void generateRandom(cv::Mat& out) {
|
||||
switch (out.depth()) {
|
||||
case CV_8U:
|
||||
cv::randu(out, 0, 255);
|
||||
break;
|
||||
case CV_32F:
|
||||
cv::randu(out, 0.f, 1.f);
|
||||
break;
|
||||
case CV_16F: {
|
||||
cv::Mat fp32_mat(out.size(), CV_MAKETYPE(CV_32F, out.channels()));
|
||||
cv::randu(fp32_mat, 0.f, 1.f);
|
||||
fp32_mat.convertTo(out, out.type());
|
||||
break;
|
||||
}
|
||||
default:
|
||||
throw std::logic_error("Unsupported preprocessing depth");
|
||||
}
|
||||
}
|
||||
|
||||
inline void sleep(double ms) {
|
||||
#if defined(_WIN32)
|
||||
// NB: It takes portions of 100 nanoseconds.
|
||||
int64_t ns_units = static_cast<int64_t>(ms * 1e4);
|
||||
// FIXME: Wrap it to RAII and instance only once.
|
||||
HANDLE timer = CreateWaitableTimer(NULL, true, NULL);
|
||||
if (!timer) {
|
||||
throw std::logic_error("Failed to create timer");
|
||||
}
|
||||
|
||||
LARGE_INTEGER li;
|
||||
li.QuadPart = -ns_units;
|
||||
if(!SetWaitableTimer(timer, &li, 0, NULL, NULL, false)){
|
||||
CloseHandle(timer);
|
||||
throw std::logic_error("Failed to set timer");
|
||||
}
|
||||
if (WaitForSingleObject(timer, INFINITE) != WAIT_OBJECT_0) {
|
||||
CloseHandle(timer);
|
||||
throw std::logic_error("Failed to wait timer");
|
||||
}
|
||||
CloseHandle(timer);
|
||||
#else
|
||||
using namespace std::chrono;
|
||||
std::this_thread::sleep_for(duration<double, std::milli>(ms));
|
||||
#endif
|
||||
}
|
||||
|
||||
template <typename duration_t>
|
||||
typename duration_t::rep measure(std::function<void()> f) {
|
||||
using namespace std::chrono;
|
||||
auto start = high_resolution_clock::now();
|
||||
f();
|
||||
return duration_cast<duration_t>(
|
||||
high_resolution_clock::now() - start).count();
|
||||
}
|
||||
|
||||
template <typename duration_t>
|
||||
typename duration_t::rep timestamp() {
|
||||
using namespace std::chrono;
|
||||
auto now = high_resolution_clock::now();
|
||||
return duration_cast<duration_t>(now.time_since_epoch()).count();
|
||||
}
|
||||
|
||||
} // namespace utils
|
||||
|
||||
#endif // OPENCV_GAPI_PIPELINE_MODELING_TOOL_UTILS_HPP
|
Loading…
Reference in New Issue
Block a user