Merge pull request #19002 from TolyaTalamanov:at/infer_gframe

[G-API] Support GFrame for infer

* GInfer(GFrame), currently broken

* Fixed (API only)

* Support GFrame in GIEBackend

* Fix comments to review

* Fix comments to review

* Fix doxygen

* Fix building with different IE versions

* Fix warning on MacOS

Co-authored-by: Dmitry Matveev <dmitry.matveev@intel.com>
Co-authored-by: Smirnov Alexey <alexey.smirnov@intel.com>
This commit is contained in:
Anatoliy Talamanov 2020-12-09 17:00:56 +03:00 committed by GitHub
parent b5a9ef6b7b
commit a55150b1bc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 670 additions and 96 deletions

View File

@ -16,6 +16,7 @@
#include <utility> // tuple
#include <type_traits> // is_same, false_type
#include <opencv2/gapi/util/util.hpp> // all_satisfy
#include <opencv2/gapi/util/any.hpp> // any<>
#include <opencv2/gapi/gkernel.hpp> // GKernelType[M], GBackend
#include <opencv2/gapi/garg.hpp> // GArg
@ -27,40 +28,54 @@ namespace cv {
template<typename, typename> class GNetworkType;
namespace detail {
template<typename, typename>
struct valid_infer2_types;
// Terminal case 1 (50/50 success)
template<typename T>
struct valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> > {
// By default, Nets are limited to GMat argument types only
// for infer2, every GMat argument may translate to either
// GArray<GMat> or GArray<Rect>. GArray<> part is stripped
// already at this point.
static constexpr const auto value =
std::is_same<typename std::decay<T>::type, cv::GMat>::value
|| std::is_same<typename std::decay<T>::type, cv::Rect>::value;
};
// Infer ///////////////////////////////////////////////////////////////////////
template<typename T>
struct accepted_infer_types {
static constexpr const auto value =
std::is_same<typename std::decay<T>::type, cv::GMat>::value
|| std::is_same<typename std::decay<T>::type, cv::GFrame>::value;
};
// Terminal case 2 (100% failure)
template<typename... Ts>
struct valid_infer2_types< std::tuple<>, std::tuple<Ts...> >
: public std::false_type {
};
template<typename... Ts>
using valid_infer_types = all_satisfy<accepted_infer_types, Ts...>;
// Terminal case 3 (100% failure)
template<typename... Ns>
struct valid_infer2_types< std::tuple<Ns...>, std::tuple<> >
: public std::false_type {
};
// Infer2 //////////////////////////////////////////////////////////////////////
// Recursion -- generic
template<typename... Ns, typename T, typename...Ts>
struct valid_infer2_types< std::tuple<cv::GMat,Ns...>, std::tuple<T,Ts...> > {
static constexpr const auto value =
valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> >::value
&& valid_infer2_types< std::tuple<Ns...>, std::tuple<Ts...> >::value;
};
template<typename, typename>
struct valid_infer2_types;
// Terminal case 1 (50/50 success)
template<typename T>
struct valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> > {
// By default, Nets are limited to GMat argument types only
// for infer2, every GMat argument may translate to either
// GArray<GMat> or GArray<Rect>. GArray<> part is stripped
// already at this point.
static constexpr const auto value =
std::is_same<typename std::decay<T>::type, cv::GMat>::value
|| std::is_same<typename std::decay<T>::type, cv::Rect>::value;
};
// Terminal case 2 (100% failure)
template<typename... Ts>
struct valid_infer2_types< std::tuple<>, std::tuple<Ts...> >
: public std::false_type {
};
// Terminal case 3 (100% failure)
template<typename... Ns>
struct valid_infer2_types< std::tuple<Ns...>, std::tuple<> >
: public std::false_type {
};
// Recursion -- generic
template<typename... Ns, typename T, typename...Ts>
struct valid_infer2_types< std::tuple<cv::GMat,Ns...>, std::tuple<T,Ts...> > {
static constexpr const auto value =
valid_infer2_types< std::tuple<cv::GMat>, std::tuple<T> >::value
&& valid_infer2_types< std::tuple<Ns...>, std::tuple<Ts...> >::value;
};
} // namespace detail
// TODO: maybe tuple_wrap_helper from util.hpp may help with this.
@ -76,10 +91,6 @@ public:
using API = std::function<Result(Args...)>;
using ResultL = std::tuple< cv::GArray<R>... >;
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
// FIXME: Args... must be limited to a single GMat
using APIRoi = std::function<Result(cv::GOpaque<cv::Rect>, Args...)>;
};
// Single-return-value network definition (specialized base class)
@ -94,20 +105,48 @@ public:
using API = std::function<R(Args...)>;
using ResultL = cv::GArray<R>;
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
};
// FIXME: Args... must be limited to a single GMat
using APIRoi = std::function<Result(cv::GOpaque<cv::Rect>, Args...)>;
// InferAPI: Accepts either GMat or GFrame for very individual network's input
template<class Net, class... Ts>
struct InferAPI {
using type = typename std::enable_if
< detail::valid_infer_types<Ts...>::value
&& std::tuple_size<typename Net::InArgs>::value == sizeof...(Ts)
, std::function<typename Net::Result(Ts...)>
>::type;
};
// InferAPIRoi: Accepts a rectangle and either GMat or GFrame
template<class Net, class T>
struct InferAPIRoi {
using type = typename std::enable_if
< detail::valid_infer_types<T>::value
&& std::tuple_size<typename Net::InArgs>::value == 1u
, std::function<typename Net::Result(cv::GOpaque<cv::Rect>, T)>
>::type;
};
// InferAPIList: Accepts a list of rectangles and list of GMat/GFrames;
// crops every input.
template<class Net, class... Ts>
struct InferAPIList {
using type = typename std::enable_if
< detail::valid_infer_types<Ts...>::value
&& std::tuple_size<typename Net::InArgs>::value == sizeof...(Ts)
, std::function<typename Net::ResultL(cv::GArray<cv::Rect>, Ts...)>
>::type;
};
// APIList2 is also template to allow different calling options
// (GArray<cv::Rect> vs GArray<cv::GMat> per input)
template<class Net, class... Ts>
template<class Net, typename T, class... Ts>
struct InferAPIList2 {
using type = typename std::enable_if
< cv::detail::valid_infer2_types< typename Net::InArgs
< detail::valid_infer_types<T>::value &&
cv::detail::valid_infer2_types< typename Net::InArgs
, std::tuple<Ts...> >::value,
std::function<typename Net::ResultL(cv::GMat, cv::GArray<Ts>...)>
std::function<typename Net::ResultL(T, cv::GArray<Ts>...)>
>::type;
};
@ -206,11 +245,11 @@ struct GInferList2Base {
// A generic inference kernel. API (::on()) is fully defined by the Net
// template parameter.
// Acts as a regular kernel in graph (via KernelTypeMedium).
template<typename Net>
template<typename Net, typename... Args>
struct GInfer final
: public GInferBase
, public detail::KernelTypeMedium< GInfer<Net>
, typename Net::API > {
, public detail::KernelTypeMedium< GInfer<Net, Args...>
, typename InferAPI<Net, Args...>::type > {
using GInferBase::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
@ -218,11 +257,11 @@ struct GInfer final
// A specific roi-inference kernel. API (::on()) is fixed here and
// verified against Net.
template<typename Net>
template<typename Net, typename T>
struct GInferROI final
: public GInferROIBase
, public detail::KernelTypeMedium< GInferROI<Net>
, typename Net::APIRoi > {
, public detail::KernelTypeMedium< GInferROI<Net, T>
, typename InferAPIRoi<Net, T>::type > {
using GInferROIBase::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
@ -231,11 +270,11 @@ struct GInferROI final
// A generic roi-list inference kernel. API (::on()) is derived from
// the Net template parameter (see more in infer<> overload).
template<typename Net>
template<typename Net, typename... Args>
struct GInferList final
: public GInferListBase
, public detail::KernelTypeMedium< GInferList<Net>
, typename Net::APIList > {
, public detail::KernelTypeMedium< GInferList<Net, Args...>
, typename InferAPIList<Net, Args...>::type > {
using GInferListBase::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
@ -246,11 +285,11 @@ struct GInferList final
// overload).
// Takes an extra variadic template list to reflect how this network
// was called (with Rects or GMats as array parameters)
template<typename Net, typename... Args>
template<typename Net, typename T, typename... Args>
struct GInferList2 final
: public GInferList2Base
, public detail::KernelTypeMedium< GInferList2<Net, Args...>
, typename InferAPIList2<Net, Args...>::type > {
, public detail::KernelTypeMedium< GInferList2<Net, T, Args...>
, typename InferAPIList2<Net, T, Args...>::type > {
using GInferList2Base::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
@ -280,9 +319,9 @@ namespace gapi {
* objects of appropriate type is returned.
* @sa G_API_NET()
*/
template<typename Net>
typename Net::Result infer(cv::GOpaque<cv::Rect> roi, cv::GMat in) {
return GInferROI<Net>::on(roi, in);
template<typename Net, typename T>
typename Net::Result infer(cv::GOpaque<cv::Rect> roi, T in) {
return GInferROI<Net, T>::on(roi, in);
}
/** @brief Calculates responses for the specified network (template
@ -300,7 +339,7 @@ typename Net::Result infer(cv::GOpaque<cv::Rect> roi, cv::GMat in) {
*/
template<typename Net, typename... Args>
typename Net::ResultL infer(cv::GArray<cv::Rect> roi, Args&&... args) {
return GInferList<Net>::on(roi, std::forward<Args>(args)...);
return GInferList<Net, Args...>::on(roi, std::forward<Args>(args)...);
}
/** @brief Calculates responses for the specified network (template
@ -320,11 +359,12 @@ typename Net::ResultL infer(cv::GArray<cv::Rect> roi, Args&&... args) {
* GArray<> objects is returned with the appropriate types inside.
* @sa G_API_NET()
*/
template<typename Net, typename... Args>
typename Net::ResultL infer2(cv::GMat image, cv::GArray<Args>... args) {
template<typename Net, typename T, typename... Args>
typename Net::ResultL infer2(T image, cv::GArray<Args>... args) {
// FIXME: Declared as "2" because in the current form it steals
// overloads from the regular infer
return GInferList2<Net, Args...>::on(image, args...);
return GInferList2<Net, T, Args...>::on(image, args...);
}
/**
@ -340,7 +380,7 @@ typename Net::ResultL infer2(cv::GMat image, cv::GArray<Args>... args) {
*/
template<typename Net, typename... Args>
typename Net::Result infer(Args&&... args) {
return GInfer<Net>::on(std::forward<Args>(args)...);
return GInfer<Net, Args...>::on(std::forward<Args>(args)...);
}
/**

View File

@ -36,6 +36,7 @@
#include <opencv2/gapi/gtype_traits.hpp>
#include <opencv2/gapi/infer.hpp>
#include <opencv2/gapi/own/convert.hpp>
#include <opencv2/gapi/gframe.hpp>
#include "compiler/gobjref.hpp"
#include "compiler/gmodel.hpp"
@ -45,6 +46,10 @@
#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
#if INF_ENGINE_RELEASE < 2021010000
#include "ie_compound_blob.h"
#endif
namespace IE = InferenceEngine;
namespace {
@ -151,6 +156,25 @@ inline IE::Blob::Ptr wrapIE(const cv::Mat &mat, cv::gapi::ie::TraitAs hint) {
return IE::Blob::Ptr{};
}
inline IE::Blob::Ptr wrapIE(const cv::MediaFrame::View& view,
const cv::GFrameDesc& desc) {
switch (desc.fmt) {
case cv::MediaFormat::BGR: {
auto bgr = cv::Mat(desc.size, CV_8UC3, view.ptr[0], view.stride[0]);
return wrapIE(bgr, cv::gapi::ie::TraitAs::IMAGE);
}
case cv::MediaFormat::NV12: {
auto y_plane = cv::Mat(desc.size, CV_8UC1, view.ptr[0], view.stride[0]);
auto uv_plane = cv::Mat(desc.size / 2, CV_8UC2, view.ptr[1], view.stride[1]);
return cv::gapi::ie::util::to_ie(y_plane, uv_plane);
}
default:
GAPI_Assert(false && "Unsupported media format for IE backend");
}
GAPI_Assert(false);
}
template<class MatType>
inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) {
switch (blob->getTensorDesc().getPrecision()) {
@ -256,6 +280,7 @@ struct IECallContext
{
// Input parameters passed to an inference operation.
std::vector<cv::GArg> args;
cv::GShapes in_shapes;
//FIXME: avoid conversion of arguments from internal representation to OpenCV one on each call
//to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run,
@ -267,6 +292,10 @@ struct IECallContext
template<typename T>
const T& inArg(std::size_t input) { return args.at(input).get<T>(); }
const cv::MediaFrame& inFrame(std::size_t input) {
return inArg<cv::MediaFrame>(input);
}
// Syntax sugar
const cv::Mat& inMat(std::size_t input) {
return inArg<cv::Mat>(input);
@ -319,6 +348,24 @@ using GConstGIEModel = ade::ConstTypedGraph
, IEUnit
, IECallable
>;
using Views = std::vector<std::unique_ptr<cv::MediaFrame::View>>;
inline IE::Blob::Ptr extractBlob(IECallContext& ctx, std::size_t i, Views& views) {
switch (ctx.in_shapes[i]) {
case cv::GShape::GFRAME: {
const auto& frame = ctx.inFrame(i);
views.emplace_back(new cv::MediaFrame::View(frame.access(cv::MediaFrame::Access::R)));
return wrapIE(*views.back(), frame.desc());
}
case cv::GShape::GMAT: {
return wrapIE(ctx.inMat(i), cv::gapi::ie::TraitAs::IMAGE);
}
default:
GAPI_Assert("Unsupported input shape for IE backend");
}
GAPI_Assert(false);
}
} // anonymous namespace
// GCPUExcecutable implementation //////////////////////////////////////////////
@ -384,6 +431,8 @@ cv::GArg cv::gimpl::ie::GIEExecutable::packArg(const cv::GArg &arg) {
// (and constructed by either bindIn/Out or resetInternal)
case GShape::GOPAQUE: return GArg(m_res.slot<cv::detail::OpaqueRef>().at(ref.id));
case GShape::GFRAME: return GArg(m_res.slot<cv::MediaFrame>().at(ref.id));
default:
util::throw_error(std::logic_error("Unsupported GShape type"));
break;
@ -413,6 +462,12 @@ void cv::gimpl::ie::GIEExecutable::run(std::vector<InObj> &&input_objs,
std::back_inserter(context.args),
std::bind(&GIEExecutable::packArg, this, _1));
// NB: Need to store inputs shape to recognize GFrame/GMat
ade::util::transform(op.args,
std::back_inserter(context.in_shapes),
[](const cv::GArg& arg) {
return arg.get<cv::gimpl::RcDesc>().shape;
});
// - Output parameters.
for (const auto &out_it : ade::util::indexed(op.outs)) {
// FIXME: Can the same GArg type resolution mechanism be reused here?
@ -438,6 +493,34 @@ namespace cv {
namespace gimpl {
namespace ie {
static void configureInputInfo(const IE::InputInfo::Ptr& ii, const cv::GMetaArg mm) {
switch (mm.index()) {
case cv::GMetaArg::index_of<cv::GMatDesc>():
{
ii->setPrecision(toIE(util::get<cv::GMatDesc>(mm).depth));
break;
}
case cv::GMetaArg::index_of<cv::GFrameDesc>():
{
const auto &meta = util::get<cv::GFrameDesc>(mm);
switch (meta.fmt) {
case cv::MediaFormat::NV12:
ii->getPreProcess().setColorFormat(IE::ColorFormat::NV12);
break;
case cv::MediaFormat::BGR:
// NB: Do nothing
break;
default:
GAPI_Assert(false && "Unsupported media format for IE backend");
}
ii->setPrecision(toIE(CV_8U));
break;
}
default:
util::throw_error(std::runtime_error("Unsupported input meta for IE backend"));
}
}
struct Infer: public cv::detail::KernelTag {
using API = cv::GInferBase;
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
@ -468,11 +551,7 @@ struct Infer: public cv::detail::KernelTag {
auto &&ii = uu.inputs.at(std::get<0>(it));
const auto & mm = std::get<1>(it);
GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
&& "Non-GMat inputs are not supported");
const auto &meta = util::get<cv::GMatDesc>(mm);
ii->setPrecision(toIE(meta.depth));
configureInputInfo(ii, mm);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
@ -495,15 +574,12 @@ struct Infer: public cv::detail::KernelTag {
static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) {
// non-generic version for now:
// - assumes all inputs/outputs are always Mats
Views views;
for (auto i : ade::util::iota(uu.params.num_in)) {
// TODO: Ideally we shouldn't do SetBlob() but GetBlob() instead,
// and redirect our data producers to this memory
// (A memory dialog comes to the picture again)
const cv::Mat this_mat = ctx.inMat(i);
// FIXME: By default here we trait our inputs as images.
// May be we need to make some more intelligence here about it
IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE);
IE::Blob::Ptr this_blob = extractBlob(ctx, i, views);
iec.this_request.SetBlob(uu.params.input_names[i], this_blob);
}
iec.this_request.Infer();
@ -540,10 +616,10 @@ struct InferROI: public cv::detail::KernelTag {
GAPI_Assert(1u == uu.params.input_names.size());
GAPI_Assert(2u == in_metas.size());
// 0th is ROI, 1st is in0put image
auto &&ii = uu.inputs.at(uu.params.input_names.at(0));
const auto &meta = util::get<cv::GMatDesc>(in_metas.at(1));
ii->setPrecision(toIE(meta.depth));
// 0th is ROI, 1st is input image
auto &&ii = uu.inputs.at(uu.params.input_names.at(0));
auto &&mm = in_metas.at(1u);
configureInputInfo(ii, mm);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
// FIXME: It would be nice here to have an exact number of network's
@ -566,10 +642,12 @@ struct InferROI: public cv::detail::KernelTag {
// non-generic version for now, per the InferROI's definition
GAPI_Assert(uu.params.num_in == 1);
const auto& this_roi = ctx.inArg<cv::detail::OpaqueRef>(0).rref<cv::Rect>();
const auto this_mat = ctx.inMat(1);
IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE);
IE::Blob::Ptr roi_blob = IE::make_shared_blob(this_blob, toIE(this_roi));
iec.this_request.SetBlob(*uu.params.input_names.begin(), roi_blob);
Views views;
IE::Blob::Ptr this_blob = extractBlob(ctx, 1, views);
iec.this_request.SetBlob(*uu.params.input_names.begin(),
IE::make_shared_blob(this_blob, toIE(this_roi)));
iec.this_request.Infer();
for (auto i : ade::util::iota(uu.params.num_out)) {
cv::Mat& out_mat = ctx.outMatR(i);
@ -606,12 +684,7 @@ struct InferList: public cv::detail::KernelTag {
for (auto &&input_name : uu.params.input_names) {
auto &&ii = uu.inputs.at(input_name);
const auto & mm = in_metas[idx++];
GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
&& "Non-GMat inputs are not supported");
const auto &meta = util::get<cv::GMatDesc>(mm);
ii->setPrecision(toIE(meta.depth));
configureInputInfo(ii, mm);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
@ -630,9 +703,9 @@ struct InferList: public cv::detail::KernelTag {
GAPI_Assert(uu.params.num_in == 1); // roi list is not counted in net's inputs
const auto& in_roi_vec = ctx.inArg<cv::detail::VectorRef>(0u).rref<cv::Rect>();
const cv::Mat this_mat = ctx.inMat(1u);
// Since we do a ROI list inference, always assume our input buffer is image
IE::Blob::Ptr this_blob = wrapIE(this_mat, cv::gapi::ie::TraitAs::IMAGE);
Views views;
IE::Blob::Ptr this_blob = extractBlob(ctx, 1, views);
// FIXME: This could be done ONCE at graph compile stage!
std::vector< std::vector<int> > cached_dims(uu.params.num_out);
@ -696,11 +769,30 @@ struct InferList2: public cv::detail::KernelTag {
// "blob"-based ones)
// FIXME: this is filtering not done, actually! GArrayDesc has
// no hint for its underlying type!
const auto &mm_0 = in_metas[0u];
const auto &meta_0 = util::get<cv::GMatDesc>(mm_0);
GAPI_Assert( !meta_0.isND()
const auto &mm_0 = in_metas[0u];
switch (in_metas[0u].index()) {
case cv::GMetaArg::index_of<cv::GMatDesc>(): {
const auto &meta_0 = util::get<cv::GMatDesc>(mm_0);
GAPI_Assert( !meta_0.isND()
&& !meta_0.planar
&& "Only images are supported as the 0th argument");
break;
}
case cv::GMetaArg::index_of<cv::GFrameDesc>(): {
// FIXME: Is there any validation for GFrame ?
break;
}
default:
util::throw_error(std::runtime_error("Unsupported input meta for IE backend"));
}
if (util::holds_alternative<cv::GMatDesc>(mm_0)) {
const auto &meta_0 = util::get<cv::GMatDesc>(mm_0);
GAPI_Assert( !meta_0.isND()
&& !meta_0.planar
&& "Only images are supported as the 0th argument");
}
std::size_t idx = 1u;
for (auto &&input_name : uu.params.input_names) {
auto &ii = uu.inputs.at(input_name);
@ -710,7 +802,7 @@ struct InferList2: public cv::detail::KernelTag {
if (op.k.inKinds[idx] == cv::detail::OpaqueKind::CV_RECT) {
// This is a cv::Rect -- configure the IE preprocessing
ii->setPrecision(toIE(meta_0.depth));
configureInputInfo(ii, mm_0);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
} else {
// This is a cv::GMat (equals to: cv::Mat)
@ -733,9 +825,8 @@ struct InferList2: public cv::detail::KernelTag {
GAPI_Assert(ctx.args.size() > 1u
&& "This operation must have at least two arguments");
// Since we do a ROI list inference, always assume our input buffer is image
const cv::Mat mat_0 = ctx.inMat(0u);
IE::Blob::Ptr blob_0 = wrapIE(mat_0, cv::gapi::ie::TraitAs::IMAGE);
Views views;
IE::Blob::Ptr blob_0 = extractBlob(ctx, 0, views);
// Take the next argument, which must be vector (of any kind).
// Use it only to obtain the ROI list size (sizes of all other
@ -869,6 +960,16 @@ IE::Blob::Ptr cv::gapi::ie::util::to_ie(cv::Mat &blob) {
return wrapIE(blob, cv::gapi::ie::TraitAs::IMAGE);
}
IE::Blob::Ptr cv::gapi::ie::util::to_ie(cv::Mat &y_plane, cv::Mat &uv_plane) {
auto y_blob = wrapIE(y_plane, cv::gapi::ie::TraitAs::IMAGE);
auto uv_blob = wrapIE(uv_plane, cv::gapi::ie::TraitAs::IMAGE);
#if INF_ENGINE_RELEASE >= 2021010000
return IE::make_shared_blob<IE::NV12Blob>(y_blob, uv_blob);
#else
return IE::make_shared_blob<InferenceEngine::NV12Blob>(y_blob, uv_blob);
#endif
}
#else // HAVE_INF_ENGINE
cv::gapi::GBackend cv::gapi::ie::backend() {

View File

@ -28,6 +28,7 @@ namespace util {
GAPI_EXPORTS std::vector<int> to_ocv(const InferenceEngine::SizeVector &dims);
GAPI_EXPORTS cv::Mat to_ocv(InferenceEngine::Blob::Ptr blob);
GAPI_EXPORTS InferenceEngine::Blob::Ptr to_ie(cv::Mat &blob);
GAPI_EXPORTS InferenceEngine::Blob::Ptr to_ie(cv::Mat &y_plane, cv::Mat &uv_plane);
}}}}

View File

@ -23,6 +23,45 @@ namespace opencv_test
{
namespace {
class TestMediaBGR final: public cv::MediaFrame::IAdapter {
cv::Mat m_mat;
using Cb = cv::MediaFrame::View::Callback;
Cb m_cb;
public:
explicit TestMediaBGR(cv::Mat m, Cb cb = [](){})
: m_mat(m), m_cb(cb) {
}
cv::GFrameDesc meta() const override {
return cv::GFrameDesc{cv::MediaFormat::BGR, cv::Size(m_mat.cols, m_mat.rows)};
}
cv::MediaFrame::View access(cv::MediaFrame::Access) override {
cv::MediaFrame::View::Ptrs pp = { m_mat.ptr(), nullptr, nullptr, nullptr };
cv::MediaFrame::View::Strides ss = { m_mat.step, 0u, 0u, 0u };
return cv::MediaFrame::View(std::move(pp), std::move(ss), Cb{m_cb});
}
};
class TestMediaNV12 final: public cv::MediaFrame::IAdapter {
cv::Mat m_y;
cv::Mat m_uv;
public:
TestMediaNV12(cv::Mat y, cv::Mat uv) : m_y(y), m_uv(uv) {
}
cv::GFrameDesc meta() const override {
return cv::GFrameDesc{cv::MediaFormat::NV12, cv::Size(m_y.cols, m_y.rows)};
}
cv::MediaFrame::View access(cv::MediaFrame::Access) override {
cv::MediaFrame::View::Ptrs pp = {
m_y.ptr(), m_uv.ptr(), nullptr, nullptr
};
cv::MediaFrame::View::Strides ss = {
m_y.step, m_uv.step, 0u, 0u
};
return cv::MediaFrame::View(std::move(pp), std::move(ss));
}
};
// FIXME: taken from DNN module
static void initDLDTDataPath()
{
@ -64,11 +103,15 @@ void normAssert(cv::InputArray ref, cv::InputArray test,
namespace IE = InferenceEngine;
void setNetParameters(IE::CNNNetwork& net) {
auto &ii = net.getInputsInfo().at("data");
void setNetParameters(IE::CNNNetwork& net, bool is_nv12 = false) {
auto ii = net.getInputsInfo().at("data");
ii->setPrecision(IE::Precision::U8);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
if (is_nv12) {
ii->getPreProcess().setColorFormat(IE::ColorFormat::NV12);
}
}
} // anonymous namespace
// TODO: Probably DNN/IE part can be further parametrized with a template
@ -246,6 +289,81 @@ struct ROIList: public ::testing::Test {
}
}; // ROIList
struct ROIListNV12: public ::testing::Test {
cv::gapi::ie::detail::ParamDesc params;
cv::Mat m_in_uv;
cv::Mat m_in_y;
std::vector<cv::Rect> m_roi_list;
std::vector<cv::Mat> m_out_ie_ages;
std::vector<cv::Mat> m_out_ie_genders;
std::vector<cv::Mat> m_out_gapi_ages;
std::vector<cv::Mat> m_out_gapi_genders;
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
void SetUp() {
initDLDTDataPath();
params.model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
params.weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
params.device_id = "CPU";
cv::Size sz{320, 240};
m_in_y = cv::Mat{sz, CV_8UC1};
cv::randu(m_in_y, 0, 255);
m_in_uv = cv::Mat{sz / 2, CV_8UC2};
cv::randu(m_in_uv, 0, 255);
// both ROIs point to the same face, with a slightly changed geometry
m_roi_list = {
cv::Rect(cv::Point{64, 60}, cv::Size{ 96, 96}),
cv::Rect(cv::Point{50, 32}, cv::Size{128, 160}),
};
// Load & run IE network
{
auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
auto net = cv::gimpl::ie::wrap::readNetwork(params);
setNetParameters(net, true);
auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
auto infer_request = this_network.CreateInferRequest();
auto frame_blob = cv::gapi::ie::util::to_ie(m_in_y, m_in_uv);
for (auto &&rc : m_roi_list) {
const auto ie_rc = IE::ROI {
0u
, static_cast<std::size_t>(rc.x)
, static_cast<std::size_t>(rc.y)
, static_cast<std::size_t>(rc.width)
, static_cast<std::size_t>(rc.height)
};
infer_request.SetBlob("data", IE::make_shared_blob(frame_blob, ie_rc));
infer_request.Infer();
using namespace cv::gapi::ie::util;
m_out_ie_ages.push_back(to_ocv(infer_request.GetBlob("age_conv3")).clone());
m_out_ie_genders.push_back(to_ocv(infer_request.GetBlob("prob")).clone());
}
} // namespace IE = ..
} // ROIList()
void validate() {
// Validate with IE itself (avoid DNN module dependency here)
ASSERT_EQ(2u, m_out_ie_ages.size());
ASSERT_EQ(2u, m_out_ie_genders.size());
ASSERT_EQ(2u, m_out_gapi_ages.size());
ASSERT_EQ(2u, m_out_gapi_genders.size());
normAssert(m_out_ie_ages [0], m_out_gapi_ages [0], "0: Test age output");
normAssert(m_out_ie_genders[0], m_out_gapi_genders[0], "0: Test gender output");
normAssert(m_out_ie_ages [1], m_out_gapi_ages [1], "1: Test age output");
normAssert(m_out_ie_genders[1], m_out_gapi_genders[1], "1: Test gender output");
}
};
TEST_F(ROIList, TestInfer)
{
cv::GArray<cv::Rect> rr;
@ -505,6 +623,320 @@ TEST(TestAgeGenderIE, CPUConfig)
cv::compile_args(cv::gapi::networks(pp))));
}
TEST_F(ROIList, MediaInputBGR)
{
initDLDTDataPath();
cv::GFrame in;
cv::GArray<cv::Rect> rr;
cv::GArray<cv::GMat> age, gender;
std::tie(age, gender) = cv::gapi::infer<AgeGender>(rr, in);
cv::GComputation comp(cv::GIn(in, rr), cv::GOut(age, gender));
auto frame = MediaFrame::Create<TestMediaBGR>(m_in_mat);
auto pp = cv::gapi::ie::Params<AgeGender> {
params.model_path, params.weights_path, params.device_id
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(frame, m_roi_list),
cv::gout(m_out_gapi_ages, m_out_gapi_genders),
cv::compile_args(cv::gapi::networks(pp)));
validate();
}
TEST_F(ROIListNV12, MediaInputNV12)
{
initDLDTDataPath();
cv::GFrame in;
cv::GArray<cv::Rect> rr;
cv::GArray<cv::GMat> age, gender;
std::tie(age, gender) = cv::gapi::infer<AgeGender>(rr, in);
cv::GComputation comp(cv::GIn(in, rr), cv::GOut(age, gender));
auto frame = MediaFrame::Create<TestMediaNV12>(m_in_y, m_in_uv);
auto pp = cv::gapi::ie::Params<AgeGender> {
params.model_path, params.weights_path, params.device_id
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(frame, m_roi_list),
cv::gout(m_out_gapi_ages, m_out_gapi_genders),
cv::compile_args(cv::gapi::networks(pp)));
validate();
}
TEST(TestAgeGenderIE, MediaInputNV12)
{
initDLDTDataPath();
cv::gapi::ie::detail::ParamDesc params;
params.model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
params.weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
params.device_id = "CPU";
cv::Size sz{320, 240};
cv::Mat in_y_mat(sz, CV_8UC1);
cv::randu(in_y_mat, 0, 255);
cv::Mat in_uv_mat(sz / 2, CV_8UC2);
cv::randu(in_uv_mat, 0, 255);
cv::Mat gapi_age, gapi_gender;
// Load & run IE network
IE::Blob::Ptr ie_age, ie_gender;
{
auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
auto net = cv::gimpl::ie::wrap::readNetwork(params);
setNetParameters(net, true);
auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
auto infer_request = this_network.CreateInferRequest();
infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_y_mat, in_uv_mat));
infer_request.Infer();
ie_age = infer_request.GetBlob("age_conv3");
ie_gender = infer_request.GetBlob("prob");
}
// Configure & run G-API
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
cv::GFrame in;
cv::GMat age, gender;
std::tie(age, gender) = cv::gapi::infer<AgeGender>(in);
cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
auto frame = MediaFrame::Create<TestMediaNV12>(in_y_mat, in_uv_mat);
auto pp = cv::gapi::ie::Params<AgeGender> {
params.model_path, params.weights_path, params.device_id
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(frame), cv::gout(gapi_age, gapi_gender),
cv::compile_args(cv::gapi::networks(pp)));
// Validate with IE itself (avoid DNN module dependency here)
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" );
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
}
TEST(TestAgeGenderIE, MediaInputBGR)
{
initDLDTDataPath();
cv::gapi::ie::detail::ParamDesc params;
params.model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
params.weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
params.device_id = "CPU";
cv::Size sz{320, 240};
cv::Mat in_mat(sz, CV_8UC3);
cv::randu(in_mat, 0, 255);
cv::Mat gapi_age, gapi_gender;
// Load & run IE network
IE::Blob::Ptr ie_age, ie_gender;
{
auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
auto net = cv::gimpl::ie::wrap::readNetwork(params);
setNetParameters(net);
auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
auto infer_request = this_network.CreateInferRequest();
infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat));
infer_request.Infer();
ie_age = infer_request.GetBlob("age_conv3");
ie_gender = infer_request.GetBlob("prob");
}
// Configure & run G-API
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
cv::GFrame in;
cv::GMat age, gender;
std::tie(age, gender) = cv::gapi::infer<AgeGender>(in);
cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
auto frame = MediaFrame::Create<TestMediaBGR>(in_mat);
auto pp = cv::gapi::ie::Params<AgeGender> {
params.model_path, params.weights_path, params.device_id
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(frame), cv::gout(gapi_age, gapi_gender),
cv::compile_args(cv::gapi::networks(pp)));
// Validate with IE itself (avoid DNN module dependency here)
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" );
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
}
TEST(InferROI, MediaInputBGR)
{
initDLDTDataPath();
cv::gapi::ie::detail::ParamDesc params;
params.model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
params.weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
params.device_id = "CPU";
cv::Size sz{320, 240};
cv::Mat in_mat(sz, CV_8UC3);
cv::randu(in_mat, 0, 255);
cv::Mat gapi_age, gapi_gender;
cv::Rect rect(cv::Point{64, 60}, cv::Size{96, 96});
// Load & run IE network
IE::Blob::Ptr ie_age, ie_gender;
{
auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
auto net = cv::gimpl::ie::wrap::readNetwork(params);
setNetParameters(net);
auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
auto infer_request = this_network.CreateInferRequest();
const auto ie_rc = IE::ROI {
0u
, static_cast<std::size_t>(rect.x)
, static_cast<std::size_t>(rect.y)
, static_cast<std::size_t>(rect.width)
, static_cast<std::size_t>(rect.height)
};
IE::Blob::Ptr roi_blob = IE::make_shared_blob(cv::gapi::ie::util::to_ie(in_mat), ie_rc);
infer_request.SetBlob("data", roi_blob);
infer_request.Infer();
ie_age = infer_request.GetBlob("age_conv3");
ie_gender = infer_request.GetBlob("prob");
}
// Configure & run G-API
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
cv::GFrame in;
cv::GOpaque<cv::Rect> roi;
cv::GMat age, gender;
std::tie(age, gender) = cv::gapi::infer<AgeGender>(roi, in);
cv::GComputation comp(cv::GIn(in, roi), cv::GOut(age, gender));
auto frame = MediaFrame::Create<TestMediaBGR>(in_mat);
auto pp = cv::gapi::ie::Params<AgeGender> {
params.model_path, params.weights_path, params.device_id
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(frame, rect), cv::gout(gapi_age, gapi_gender),
cv::compile_args(cv::gapi::networks(pp)));
// Validate with IE itself (avoid DNN module dependency here)
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" );
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
}
TEST(InferROI, MediaInputNV12)
{
initDLDTDataPath();
cv::gapi::ie::detail::ParamDesc params;
params.model_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.xml");
params.weights_path = findDataFile(SUBDIR + "age-gender-recognition-retail-0013.bin");
params.device_id = "CPU";
cv::Size sz{320, 240};
auto in_y_mat = cv::Mat{sz, CV_8UC1};
cv::randu(in_y_mat, 0, 255);
auto in_uv_mat = cv::Mat{sz / 2, CV_8UC2};
cv::randu(in_uv_mat, 0, 255);
cv::Mat gapi_age, gapi_gender;
cv::Rect rect(cv::Point{64, 60}, cv::Size{96, 96});
// Load & run IE network
IE::Blob::Ptr ie_age, ie_gender;
{
auto plugin = cv::gimpl::ie::wrap::getPlugin(params);
auto net = cv::gimpl::ie::wrap::readNetwork(params);
setNetParameters(net, true);
auto this_network = cv::gimpl::ie::wrap::loadNetwork(plugin, net, params);
auto infer_request = this_network.CreateInferRequest();
const auto ie_rc = IE::ROI {
0u
, static_cast<std::size_t>(rect.x)
, static_cast<std::size_t>(rect.y)
, static_cast<std::size_t>(rect.width)
, static_cast<std::size_t>(rect.height)
};
IE::Blob::Ptr roi_blob = IE::make_shared_blob(cv::gapi::ie::util::to_ie(in_y_mat, in_uv_mat), ie_rc);
infer_request.SetBlob("data", roi_blob);
infer_request.Infer();
ie_age = infer_request.GetBlob("age_conv3");
ie_gender = infer_request.GetBlob("prob");
}
// Configure & run G-API
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
cv::GFrame in;
cv::GOpaque<cv::Rect> roi;
cv::GMat age, gender;
std::tie(age, gender) = cv::gapi::infer<AgeGender>(roi, in);
cv::GComputation comp(cv::GIn(in, roi), cv::GOut(age, gender));
auto frame = MediaFrame::Create<TestMediaNV12>(in_y_mat, in_uv_mat);
auto pp = cv::gapi::ie::Params<AgeGender> {
params.model_path, params.weights_path, params.device_id
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(frame, rect), cv::gout(gapi_age, gapi_gender),
cv::compile_args(cv::gapi::networks(pp)));
// Validate with IE itself (avoid DNN module dependency here)
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" );
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
}
TEST_F(ROIList, Infer2MediaInputBGR)
{
cv::GArray<cv::Rect> rr;
cv::GFrame in;
cv::GArray<cv::GMat> age, gender;
std::tie(age, gender) = cv::gapi::infer2<AgeGender>(in, rr);
cv::GComputation comp(cv::GIn(in, rr), cv::GOut(age, gender));
auto frame = MediaFrame::Create<TestMediaBGR>(m_in_mat);
auto pp = cv::gapi::ie::Params<AgeGender> {
params.model_path, params.weights_path, params.device_id
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(frame, m_roi_list),
cv::gout(m_out_gapi_ages, m_out_gapi_genders),
cv::compile_args(cv::gapi::networks(pp)));
validate();
}
TEST_F(ROIListNV12, Infer2MediaInputNV12)
{
cv::GArray<cv::Rect> rr;
cv::GFrame in;
cv::GArray<cv::GMat> age, gender;
std::tie(age, gender) = cv::gapi::infer2<AgeGender>(in, rr);
cv::GComputation comp(cv::GIn(in, rr), cv::GOut(age, gender));
auto frame = MediaFrame::Create<TestMediaNV12>(m_in_y, m_in_uv);
auto pp = cv::gapi::ie::Params<AgeGender> {
params.model_path, params.weights_path, params.device_id
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(frame, m_roi_list),
cv::gout(m_out_gapi_ages, m_out_gapi_genders),
cv::compile_args(cv::gapi::networks(pp)));
validate();
}
} // namespace opencv_test
#endif // HAVE_INF_ENGINE