Merge pull request #22542 from TolyaTalamanov:at/expand-performance-report-with-metrics

[G-API] Pipeline modeling tool - expand performance report
This commit is contained in:
Alexander Smorkalov 2022-10-03 13:14:17 +03:00 committed by GitHub
commit 468431cd8f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 76 additions and 47 deletions

View File

@ -18,6 +18,7 @@ public:
const bool drop_frames);
bool pull(cv::gapi::wip::Data& data) override;
cv::GMetaArg descr_of() const override;
double latency() const { return m_latency; };
private:
double m_latency;

View File

@ -1,13 +1,18 @@
#ifndef OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_HPP
#define OPENCV_GAPI_PIPELINE_MODELING_TOOL_PIPELINE_HPP
#include <iomanip>
struct PerfReport {
std::string name;
double avg_latency = 0.0;
double throughput = 0.0;
int64_t first_run_latency = 0;
int64_t elapsed = 0;
int64_t compilation_time = 0;
std::string name;
double avg_latency = 0.0;
int64_t min_latency = 0;
int64_t max_latency = 0;
int64_t first_latency = 0;
double throughput = 0.0;
int64_t elapsed = 0;
int64_t warmup_time = 0;
int64_t num_late_frames = 0;
std::vector<int64_t> latencies;
std::string toStr(bool expanded = false) const;
@ -15,17 +20,19 @@ struct PerfReport {
std::string PerfReport::toStr(bool expand) const {
std::stringstream ss;
ss << name << ": Compilation time: " << compilation_time << " ms; "
<< "Average latency: " << avg_latency << " ms; Throughput: "
<< throughput << " FPS; First latency: "
<< first_run_latency << " ms";
ss << name << ": \n"
<< " Warm up time: " << warmup_time << " ms\n"
<< " Execution time: " << elapsed << " ms\n"
<< " Frames: " << num_late_frames << "/" << latencies.size() << " (late/all)\n"
<< " Latency:\n"
<< " first: " << first_latency << " ms\n"
<< " min: " << min_latency << " ms\n"
<< " max: " << max_latency << " ms\n"
<< " avg: " << std::fixed << std::setprecision(3) << avg_latency << " ms\n"
<< " Throughput: " << std::fixed << std::setprecision(3) << throughput << " FPS";
if (expand) {
ss << "\nTotal processed frames: " << latencies.size()
<< "\nTotal elapsed time: " << elapsed << " ms" << std::endl;
for (size_t i = 0; i < latencies.size(); ++i) {
ss << std::endl;
ss << "Frame:" << i << "\nLatency: "
ss << "\nFrame:" << i << "\nLatency: "
<< latencies[i] << " ms";
}
}
@ -37,11 +44,11 @@ class Pipeline {
public:
using Ptr = std::shared_ptr<Pipeline>;
Pipeline(std::string&& name,
cv::GComputation&& comp,
cv::gapi::wip::IStreamSource::Ptr&& src,
cv::GCompileArgs&& args,
const size_t num_outputs);
Pipeline(std::string&& name,
cv::GComputation&& comp,
std::shared_ptr<DummySource>&& src,
cv::GCompileArgs&& args,
const size_t num_outputs);
void compile();
void run(double work_time_ms);
@ -59,19 +66,19 @@ protected:
virtual void _compile() = 0;
virtual RunPerf _run(double work_time_ms) = 0;
std::string m_name;
cv::GComputation m_comp;
cv::gapi::wip::IStreamSource::Ptr m_src;
cv::GCompileArgs m_args;
size_t m_num_outputs;
PerfReport m_perf;
std::string m_name;
cv::GComputation m_comp;
std::shared_ptr<DummySource> m_src;
cv::GCompileArgs m_args;
size_t m_num_outputs;
PerfReport m_perf;
};
Pipeline::Pipeline(std::string&& name,
cv::GComputation&& comp,
cv::gapi::wip::IStreamSource::Ptr&& src,
cv::GCompileArgs&& args,
const size_t num_outputs)
Pipeline::Pipeline(std::string&& name,
cv::GComputation&& comp,
std::shared_ptr<DummySource>&& src,
cv::GCompileArgs&& args,
const size_t num_outputs)
: m_name(std::move(name)),
m_comp(std::move(comp)),
m_src(std::move(src)),
@ -81,7 +88,7 @@ Pipeline::Pipeline(std::string&& name,
}
void Pipeline::compile() {
m_perf.compilation_time =
m_perf.warmup_time =
utils::measure<std::chrono::milliseconds>([this]() {
_compile();
});
@ -90,17 +97,23 @@ void Pipeline::compile() {
void Pipeline::run(double work_time_ms) {
auto run_perf = _run(work_time_ms);
m_perf.elapsed = run_perf.elapsed;
m_perf.latencies = std::move(run_perf.latencies);
m_perf.elapsed = run_perf.elapsed;
m_perf.latencies = std::move(run_perf.latencies);
m_perf.avg_latency = utils::avg(m_perf.latencies);
m_perf.min_latency = utils::min(m_perf.latencies);
m_perf.max_latency = utils::max(m_perf.latencies);
m_perf.first_latency = m_perf.latencies[0];
// NB: Count how many executions don't fit into camera latency interval.
m_perf.num_late_frames =
std::count_if(m_perf.latencies.begin(), m_perf.latencies.end(),
[this](int64_t latency) {
return static_cast<double>(latency) > m_src->latency();
});
m_perf.avg_latency =
std::accumulate(m_perf.latencies.begin(),
m_perf.latencies.end(),
0.0) / static_cast<double>(m_perf.latencies.size());
m_perf.throughput =
(m_perf.latencies.size() / static_cast<double>(m_perf.elapsed)) * 1000;
m_perf.first_run_latency = m_perf.latencies[0];
}
const PerfReport& Pipeline::report() const {

View File

@ -295,15 +295,15 @@ private:
std::vector<Edge> output_edges;
};
M<std::string, Node::Ptr> calls_map;
std::vector<Node::Ptr> all_calls;
M<std::string, Node::Ptr> calls_map;
std::vector<Node::Ptr> all_calls;
cv::gapi::GNetPackage networks;
cv::gapi::GKernelPackage kernels;
cv::GCompileArgs compile_args;
cv::gapi::wip::IStreamSource::Ptr src;
PLMode mode = PLMode::STREAMING;
std::string name;
cv::gapi::GNetPackage networks;
cv::gapi::GKernelPackage kernels;
cv::GCompileArgs compile_args;
std::shared_ptr<DummySource> src;
PLMode mode = PLMode::STREAMING;
std::string name;
};
std::unique_ptr<State> m_state;

View File

@ -104,6 +104,21 @@ void mergeMapWith(std::map<K, V>& target, const std::map<K, V>& second) {
}
}
template <typename T>
double avg(const std::vector<T>& vec) {
return std::accumulate(vec.begin(), vec.end(), 0.0) / vec.size();
}
template <typename T>
T max(const std::vector<T>& vec) {
return *std::max_element(vec.begin(), vec.end());
}
template <typename T>
T min(const std::vector<T>& vec) {
return *std::min_element(vec.begin(), vec.end());
}
} // namespace utils
#endif // OPENCV_GAPI_PIPELINE_MODELING_TOOL_UTILS_HPP