Merge pull request #15090 from dmatveev:dm/ng-0001-g-api-inference-api

* G-API-NG/API: Introduced inference API and IE-based backend

- Very quick-n-dirty implementation
- OpenCV's own DNN module is not used
- No tests so far

* G-API-NG/IE: Refined IE backend, added more tests

* G-API-NG/IE: Fixed various CI warnings & build issues + tests

- Added tests on multi-dimensional own::Mat
- Added tests on GMatDesc with dimensions
- Documentation on infer.hpp
- Fixed more warnings + added a ROI list test
- Fix descr_of clash for vector<Mat> & standalone mode
- Fix build issue with gcc-4.8x
- Addressed review comments

* G-API-NG/IE: Addressed review comments

- Pass `false` to findDataFile()
- Add deprecation warning suppression macros for IE
This commit is contained in:
Dmitry Matveev 2019-08-05 17:56:34 +03:00 committed by Alexander Alekhin
parent 59b0314a0e
commit 0757a51e2b
32 changed files with 1974 additions and 85 deletions

View File

@ -15,6 +15,7 @@ if (NOT HAVE_CXX11 OR NOT TARGET ade)
endif()
set(the_description "OpenCV G-API Core Module")
ocv_add_module(gapi opencv_imgproc)
file(GLOB gapi_ext_hdrs
@ -45,6 +46,7 @@ set(gapi_srcs
src/api/kernels_core.cpp
src/api/kernels_imgproc.cpp
src/api/render.cpp
src/api/ginfer.cpp
# Compiler part
src/compiler/gmodel.cpp
@ -82,6 +84,10 @@ set(gapi_srcs
src/backends/ocl/goclimgproc.cpp
src/backends/ocl/goclcore.cpp
# IE Backend. FIXME: should be included by CMake
# if and only if IE support is enabled
src/backends/ie/giebackend.cpp
# Compound
src/backends/common/gcompoundbackend.cpp
src/backends/common/gcompoundkernel.cpp
@ -100,9 +106,8 @@ ocv_module_include_directories("${CMAKE_CURRENT_LIST_DIR}/src")
# Note `ade` is not a module name but link dependency for ${the_module}
# (which is opencv_gapi)
ocv_create_module(ade)
ocv_add_accuracy_tests()
ocv_create_module(ade ${INF_ENGINE_TARGET})
ocv_add_accuracy_tests(${INF_ENGINE_TARGET})
# FIXME: test binary is linked with ADE directly since ADE symbols
# are not exported from libopencv_gapi.so in any form - thus
# there're two copies of ADE code in memory when tests run (!)

View File

@ -20,6 +20,9 @@
#include <opencv2/gapi/util/throw.hpp>
#include <opencv2/gapi/own/assert.hpp>
#include <opencv2/gapi/gmat.hpp> // flatten_g only!
#include <opencv2/gapi/gscalar.hpp> // flatten_g only!
namespace cv
{
// Forward declaration; GNode and GOrigin are an internal
@ -247,6 +250,24 @@ namespace detail
return m_ref->m_desc;
}
};
// Helper (FIXME: work-around?)
// stripping G types to their host types
// like cv::GArray<GMat> would still map to std::vector<cv::Mat>
// but not to std::vector<cv::GMat>
#if defined(GAPI_STANDALONE)
# define FLATTEN_NS cv::gapi::own
#else
# define FLATTEN_NS cv
#endif
template<class T> struct flatten_g;
template<> struct flatten_g<cv::GMat> { using type = FLATTEN_NS::Mat; };
template<> struct flatten_g<cv::GScalar> { using type = FLATTEN_NS::Scalar; };
template<class T> struct flatten_g { using type = T; };
#undef FLATTEN_NS
// FIXME: the above mainly duplicates "ProtoToParam" thing from gtyped.hpp
// but I decided not to include gtyped here - probably worth moving that stuff
// to some common place? (DM)
} // namespace detail
/** \addtogroup gapi_data_objects
@ -263,10 +284,16 @@ public:
detail::GArrayU strip() const { return m_ref; }
private:
static void VCTor(detail::VectorRef& vref) { vref.reset<T>(); }
// Host type (or Flat type) - the type this GArray is actually
// specified to.
using HT = typename detail::flatten_g<typename std::decay<T>::type>::type;
static void VCTor(detail::VectorRef& vref) {
vref.reset<HT>();
}
void putDetails() {
m_ref.setConstructFcn(&VCTor);
m_ref.specifyType<T>();
m_ref.specifyType<HT>();
}
detail::GArrayU m_ref;

View File

@ -36,8 +36,9 @@ struct GAPI_EXPORTS GKernel
using M = std::function<GMetaArgs(const GMetaArgs &, const GArgs &)>;
const std::string name; // kernel ID, defined by its API (signature)
const std::string tag; // some (implementation-specific) tag
const M outMeta; // generic adaptor to API::outMeta(...)
const GShapes outShapes; // types (shapes) kernel's outputs
const GShapes outShapes; // types (shapes) kernel's outputs
};
// GKernelImpl describes particular kernel implementation to the system
@ -166,6 +167,12 @@ namespace detail
}
};
////////////////////////////////////////////////////////////////////////////
// Helper class to introduce tags to calls. By default there's no tag
struct NoTag {
static constexpr const char *tag() { return ""; }
};
} // namespace detail
// GKernelType and GKernelTypeM are base classes which implement typed ::on()
@ -175,8 +182,9 @@ namespace detail
// GKernelTypeM respectively.
template<typename K, typename... R, typename... Args>
class GKernelTypeM<K, std::function<std::tuple<R...>(Args...)> >:
public detail::MetaHelper<K, std::tuple<Args...>, std::tuple<R...>>
class GKernelTypeM<K, std::function<std::tuple<R...>(Args...)> >
: public detail::MetaHelper<K, std::tuple<Args...>, std::tuple<R...>>
, public detail::NoTag
{
template<int... IIs>
static std::tuple<R...> yield(cv::GCall &call, detail::Seq<IIs...>)
@ -190,7 +198,7 @@ public:
static std::tuple<R...> on(Args... args)
{
cv::GCall call(GKernel{K::id(), &K::getOutMeta, {detail::GTypeTraits<R>::shape...}});
cv::GCall call(GKernel{K::id(), K::tag(), &K::getOutMeta, {detail::GTypeTraits<R>::shape...}});
call.pass(args...);
return yield(call, typename detail::MkSeq<sizeof...(R)>::type());
}
@ -199,8 +207,9 @@ public:
template<typename, typename> class GKernelType;
template<typename K, typename R, typename... Args>
class GKernelType<K, std::function<R(Args...)> >:
public detail::MetaHelper<K, std::tuple<Args...>, R>
class GKernelType<K, std::function<R(Args...)> >
: public detail::MetaHelper<K, std::tuple<Args...>, R>
, public detail::NoTag
{
public:
using InArgs = std::tuple<Args...>;
@ -208,7 +217,7 @@ public:
static R on(Args... args)
{
cv::GCall call(GKernel{K::id(), &K::getOutMeta, {detail::GTypeTraits<R>::shape}});
cv::GCall call(GKernel{K::id(), K::tag(), &K::getOutMeta, {detail::GTypeTraits<R>::shape}});
call.pass(args...);
return detail::Yield<R>::yield(call, 0);
}
@ -244,6 +253,9 @@ public:
public detail::G_ID_HELPER_CLASS(Class)
// {body} is to be defined by user
#define G_API_OP G_TYPED_KERNEL
#define G_API_OP_M G_TYPED_KERNEL_M
namespace cv
{
namespace gapi
@ -437,6 +449,7 @@ namespace gapi {
return includesAPI(KAPI::id());
}
// FIXME: The below comment is wrong, and who needs this function?
/**
* @brief Find a kernel (by its API)
*

View File

@ -69,15 +69,26 @@ struct GAPI_EXPORTS GMatDesc
int chan;
cv::gapi::own::Size size; // NB.: no multi-dimensional cases covered yet
bool planar;
std::vector<int> dims; // FIXME: Maybe it's real questionable to have it here
GMatDesc(int d, int c, cv::gapi::own::Size s, bool p = false)
: depth(d), chan(c), size(s), planar(p) {}
GMatDesc(int d, const std::vector<int> &dd)
: depth(d), chan(-1), size{-1,-1}, planar(false), dims(dd) {}
GMatDesc(int d, std::vector<int> &&dd)
: depth(d), chan(-1), size{-1,-1}, planar(false), dims(std::move(dd)) {}
GMatDesc() : GMatDesc(-1, -1, {-1,-1}) {}
inline bool operator== (const GMatDesc &rhs) const
{
return depth == rhs.depth && chan == rhs.chan && size == rhs.size && planar == rhs.planar;
return depth == rhs.depth
&& chan == rhs.chan
&& size == rhs.size
&& planar == rhs.planar
&& dims == rhs.dims;
}
inline bool operator!= (const GMatDesc &rhs) const
@ -85,6 +96,8 @@ struct GAPI_EXPORTS GMatDesc
return !(*this == rhs);
}
bool isND() const { return !dims.empty(); }
// Checks if the passed mat can be described by this descriptor
// (it handles the case when
// 1-channel mat can be reinterpreted as is (1-channel mat)

View File

@ -61,13 +61,15 @@ namespace detail
} // namespace detail
// Note: descr_of(std::vector<..>) returns a GArrayDesc, while
// descrs_of(std::vector<..>) returns an array of Meta args!
class Mat;
class UMat;
GAPI_EXPORTS cv::GMetaArgs descr_of(const std::vector<cv::Mat> &vec);
GAPI_EXPORTS cv::GMetaArgs descr_of(const std::vector<cv::UMat> &vec);
GAPI_EXPORTS cv::GMetaArgs descrs_of(const std::vector<cv::Mat> &vec);
GAPI_EXPORTS cv::GMetaArgs descrs_of(const std::vector<cv::UMat> &vec);
namespace gapi { namespace own {
class Mat;
GAPI_EXPORTS cv::GMetaArgs descr_of(const std::vector<Mat> &vec);
GAPI_EXPORTS cv::GMetaArgs descrs_of(const std::vector<Mat> &vec);
}} // namespace gapi::own
} // namespace cv

View File

@ -0,0 +1,231 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2019 Intel Corporation
#ifndef OPENCV_GAPI_INFER_HPP
#define OPENCV_GAPI_INFER_HPP
// FIXME: Inference API is currently only available in full mode
#if !defined(GAPI_STANDALONE)
#include <functional>
#include <string> // string
#include <utility> // tuple
#include <opencv2/gapi/util/any.hpp> // any<>
#include <opencv2/gapi/gkernel.hpp> // GKernelType[M], GBackend
#include <opencv2/gapi/garg.hpp> // GArg
#include <opencv2/gapi/gcommon.hpp> // CompileArgTag
#include <opencv2/gapi/gmetaarg.hpp> // GMetaArg
namespace cv {
namespace detail {
// This tiny class eliminates the semantic difference between
// GKernelType and GKernelTypeM.
// FIXME: Something similar can be reused for regular kernels
template<typename, typename>
struct KernelTypeMedium;
template<class K, typename... R, typename... Args>
struct KernelTypeMedium<K, std::function<std::tuple<R...>(Args...)> >:
public GKernelTypeM<K, std::function<std::tuple<R...>(Args...)> > {};
template<class K, typename R, typename... Args>
struct KernelTypeMedium<K, std::function<R(Args...)> >:
public GKernelType<K, std::function<R(Args...)> > {};
} // namespace detail
template<typename, typename> class GNetworkType;
// TODO: maybe tuple_wrap_helper from util.hpp may help with this.
// Multiple-return-value network definition (specialized base class)
template<typename K, typename... R, typename... Args>
class GNetworkType<K, std::function<std::tuple<R...>(Args...)> >
{
public:
using InArgs = std::tuple<Args...>;
using OutArgs = std::tuple<R...>;
using Result = OutArgs;
using API = std::function<Result(Args...)>;
using ResultL = std::tuple< cv::GArray<R>... >;
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
};
// Single-return-value network definition (specialized base class)
template<typename K, typename R, typename... Args>
class GNetworkType<K, std::function<R(Args...)> >
{
public:
using InArgs = std::tuple<Args...>;
using OutArgs = std::tuple<R>;
using Result = R;
using API = std::function<R(Args...)>;
using ResultL = cv::GArray<R>;
using APIList = std::function<ResultL(cv::GArray<cv::Rect>, Args...)>;
};
// Base "Infer" kernel. Note - for whatever network, kernel ID
// is always the same. Different inference calls are distinguished by
// network _tag_ (an extra field in GCall)
//
// getOutMeta is a stub callback collected by G-API kernel subsystem
// automatically. This is a rare case when this callback is defined by
// a particular backend, not by a network itself.
struct GInferBase {
static constexpr const char * id() {
return "org.opencv.dnn.infer"; // Universal stub
}
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
return GMetaArgs{}; // One more universal stub
}
};
// Base "Infer list" kernel.
// All notes from "Infer" kernel apply here as well.
struct GInferListBase {
static constexpr const char * id() {
return "org.opencv.dnn.infer-roi"; // Universal stub
}
static GMetaArgs getOutMeta(const GMetaArgs &, const GArgs &) {
return GMetaArgs{}; // One more universal stub
}
};
// A generic inference kernel. API (::on()) is fully defined by the Net
// template parameter.
// Acts as a regular kernel in graph (via KernelTypeMedium).
template<typename Net>
struct GInfer final
: public GInferBase
, public detail::KernelTypeMedium< GInfer<Net>
, typename Net::API > {
using GInferBase::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
};
// A generic roi-list inference kernel. API (::on()) is derived from
// the Net template parameter (see more in infer<> overload).
template<typename Net>
struct GInferList final
: public GInferListBase
, public detail::KernelTypeMedium< GInferList<Net>
, typename Net::APIList > {
using GInferListBase::getOutMeta; // FIXME: name lookup conflict workaround?
static constexpr const char* tag() { return Net::tag(); }
};
} // namespace cv
// FIXME: Probably the <API> signature makes a function/tuple/function round-trip
#define G_API_NET(Class, API, Tag) \
struct Class final: public cv::GNetworkType<Class, std::function API> { \
static constexpr const char * tag() { return Tag; } \
}
namespace cv {
namespace gapi {
/** @brief Calculates responses for the specified network (template
* parameter) for every region in the source image.
*
* @tparam A network type defined with G_API_NET() macro.
* @param roi a list of rectangles describing regions of interest
* in the source image. Usually an output of object detector or tracker.
* @param args network's input parameters as specified in G_API_NET() macro.
* NOTE: verified to work reliably with 1-input topologies only.
* @return a list of objects of return type as defined in G_API_NET().
* If a network has multiple return values (defined with a tuple), a tuple of
* GArray<> objects is returned with the appropriate types inside.
* @sa G_API_NET()
*/
template<typename Net, typename... Args>
typename Net::ResultL infer(cv::GArray<cv::Rect> roi, Args&&... args) {
return GInferList<Net>::on(roi, std::forward<Args>(args)...);
}
/**
* @brief Calculates response for the specified network (template
* parameter) given the input data.
*
* @tparam A network type defined with G_API_NET() macro.
* @param args network's input parameters as specified in G_API_NET() macro.
* @return an object of return type as defined in G_API_NET().
* If a network has multiple return values (defined with a tuple), a tuple of
* objects of apprpriate type is returned.
* @sa G_API_NET()
*/
template<typename Net, typename... Args>
typename Net::Result infer(Args&&... args) {
return GInfer<Net>::on(std::forward<Args>(args)...);
}
} // namespace gapi
} // namespace cv
#endif // GAPI_STANDALONE
namespace cv {
namespace gapi {
// Note: the below code _is_ part of STANDALONE build,
// just to make our compiler code compileable.
// A type-erased form of network parameters.
// Similar to how a type-erased GKernel is represented and used.
struct GAPI_EXPORTS GNetParam {
std::string tag; // FIXME: const?
GBackend backend; // Specifies the execution model
util::any params; // Backend-interpreted parameter structure
};
/**
* @brief A container class for network configurations. Similar to
* GKernelPackage.Use cv::gapi::networks() to construct this object.
*
* @sa cv::gapi::networks
*/
struct GAPI_EXPORTS GNetPackage {
explicit GNetPackage(std::initializer_list<GNetParam> &&ii = {});
std::vector<GBackend> backends() const;
std::vector<GNetParam> networks;
};
} // namespace gapi
namespace detail {
template<typename T>
gapi::GNetParam strip(T&& t) {
return gapi::GNetParam { t.tag()
, t.backend()
, t.params()
};
}
template<> struct CompileArgTag<cv::gapi::GNetPackage> {
static const char* tag() { return "gapi.net_package"; }
};
} // namespace cv::detail
namespace gapi {
template<typename... Args>
cv::gapi::GNetPackage networks(Args&&... args) {
return cv::gapi::GNetPackage({ cv::detail::strip(args)... });
}
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_INFER_HPP

View File

@ -0,0 +1,106 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2019 Intel Corporation
#ifndef OPENCV_GAPI_INFER_IE_HPP
#define OPENCV_GAPI_INFER_IE_HPP
#ifdef HAVE_INF_ENGINE
#include <unordered_map>
#include <string>
#include <array>
#include <tuple> // tuple, tuple_size
#include <opencv2/gapi/opencv_includes.hpp>
#include <opencv2/gapi/util/any.hpp>
namespace cv {
namespace gapi {
// FIXME: introduce a new sub-namespace for NN?
namespace ie {
GAPI_EXPORTS cv::gapi::GBackend backend();
namespace detail {
struct ParamDesc {
std::string model_path;
std::string weights_path;
std::string device_id;
// NB: Here order follows the `Net` API
std::vector<std::string> input_names;
std::vector<std::string> output_names;
std::unordered_map<std::string, cv::Mat> const_inputs;
// NB: nun_* may differ from topology's real input/output port numbers
// (e.g. topology's partial execution)
std::size_t num_in; // How many inputs are defined in the operation
std::size_t num_out; // How many outputs are defined in the operation
};
} // namespace detail
// FIXME: this is probably a shared (reusable) thing
template<typename Net>
struct PortCfg {
using In = std::array
< std::string
, std::tuple_size<typename Net::InArgs>::value >;
using Out = std::array
< std::string
, std::tuple_size<typename Net::OutArgs>::value >;
};
template<typename Net> class Params {
public:
Params(const std::string &model,
const std::string &weights,
const std::string &device)
: desc{ model, weights, device, {}, {}, {}
, std::tuple_size<typename Net::InArgs>::value
, std::tuple_size<typename Net::OutArgs>::value
} {
};
Params<Net>& cfgInputLayers(const typename PortCfg<Net>::In &ll) {
desc.input_names.clear();
desc.input_names.reserve(ll.size());
std::copy(ll.begin(), ll.end(),
std::back_inserter(desc.input_names));
return *this;
}
Params<Net>& cfgOutputLayers(const typename PortCfg<Net>::Out &ll) {
desc.output_names.clear();
desc.output_names.reserve(ll.size());
std::copy(ll.begin(), ll.end(),
std::back_inserter(desc.output_names));
return *this;
}
Params<Net>& constInput(const std::string &layer_name,
const cv::Mat &data) {
desc.const_inputs[layer_name] = data;
return *this;
}
// BEGIN(G-API's network parametrization API)
GBackend backend() const { return cv::gapi::ie::backend(); }
std::string tag() const { return Net::tag(); }
cv::util::any params() const { return { desc }; }
// END(G-API's network parametrization API)
protected:
detail::ParamDesc desc;
};
} // namespace ie
} // namespace gapi
} // namespace cv
#endif // HAVE_INF_ENGINE
#endif // OPENCV_GAPI_INFER_HPP

View File

@ -0,0 +1,31 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2019 Intel Corporation
#ifndef OPENCV_GAPI_INFER_IE_UTIL_HPP
#define OPENCV_GAPI_INFER_IE_UTIL_HPP
#ifdef HAVE_INF_ENGINE
// NOTE: This file is not included by default in infer/ie.hpp
// and won't be. infer/ie.hpp doesn't depend on IE headers itself.
// This file does -- so needs to be included separately by those who care.
#include "inference_engine.hpp"
namespace cv {
namespace gapi {
namespace ie {
namespace util {
GAPI_EXPORTS std::vector<int> to_ocv(const InferenceEngine::SizeVector &dims);
GAPI_EXPORTS cv::Mat to_ocv(InferenceEngine::Blob::Ptr blob);
GAPI_EXPORTS InferenceEngine::Blob::Ptr to_ie(cv::Mat &blob);
}}}}
#endif // HAVE_INF_ENGINE
#endif // OPENCV_GAPI_INFER_IE_UTIL_HPP

View File

@ -17,8 +17,23 @@
namespace cv
{
inline cv::gapi::own::Mat to_own(Mat const& m) { return {m.rows, m.cols, m.type(), m.data, m.step};};
template<typename T>
std::vector<T> to_own(const cv::MatSize &sz) {
std::vector<T> result(sz.dims());
for (int i = 0; i < sz.dims(); i++) {
// Note: cv::MatSize is not iterable
result[i] = static_cast<T>(sz[i]);
}
return result;
}
cv::gapi::own::Mat to_own(Mat&&) = delete;
inline cv::gapi::own::Mat to_own(Mat const& m) {
return (m.dims == 2)
? cv::gapi::own::Mat{m.rows, m.cols, m.type(), m.data, m.step}
: cv::gapi::own::Mat{to_own<int>(m.size), m.type(), m.data};
};
inline cv::gapi::own::Scalar to_own(const cv::Scalar& s) { return {s[0], s[1], s[2], s[3]}; };
@ -32,7 +47,11 @@ namespace gapi
{
namespace own
{
inline cv::Mat to_ocv(Mat const& m) { return {m.rows, m.cols, m.type(), m.data, m.step};};
inline cv::Mat to_ocv(Mat const& m) {
return m.dims.empty()
? cv::Mat{m.rows, m.cols, m.type(), m.data, m.step}
: cv::Mat{m.dims, m.type(), m.data};
}
cv::Mat to_ocv(Mat&&) = delete;
inline cv::Scalar to_ocv(const Scalar& s) { return {s[0], s[1], s[2], s[3]}; };

View File

@ -16,6 +16,7 @@
#include <memory> //std::shared_ptr
#include <cstring> //std::memcpy
#include <numeric> //std::accumulate
#include <opencv2/gapi/util/throw.hpp>
namespace cv { namespace gapi { namespace own {
@ -49,6 +50,10 @@ namespace cv { namespace gapi { namespace own {
: flags((type & TYPE_MASK)), rows(_rows), cols(_cols), data((uchar*)_data), step(_step == AUTO_STEP ? detail::default_step(type, _cols) : _step)
{}
MatHeader(const std::vector<int> &_dims, int type, void* _data)
: flags((type & TYPE_MASK)), data((uchar*)_data), step(0), dims(_dims)
{}
MatHeader(const MatHeader& ) = default;
MatHeader(MatHeader&& src) : MatHeader(src) // reuse copy constructor here
{
@ -74,8 +79,10 @@ namespace cv { namespace gapi { namespace own {
//! pointer to the data
uchar* data = nullptr;
size_t step = 0;
//! dimensions (ND-case)
std::vector<int> dims;
};
}
} // namespace detail
//concise version of cv::Mat suitable for GAPI needs (used when no dependence on OpenCV is required)
class Mat : public detail::MatHeader{
public:
@ -100,6 +107,14 @@ namespace cv { namespace gapi { namespace own {
: MatHeader (_rows, _cols, _type, _data, _step)
{}
Mat(const std::vector<int> &_dims, int _type, void* _data)
: MatHeader (_dims, _type, _data)
{}
Mat(std::vector<int> &&_dims, int _type, void* _data)
: MatHeader (std::move(_dims), _type, _data)
{}
Mat(Mat const& src, const Rect& roi )
: Mat(src)
{
@ -120,9 +135,6 @@ namespace cv { namespace gapi { namespace own {
Mat& operator = (const Scalar& s)
{
constexpr unsigned max_channels = 4; //Scalar can't fit more than 4
const auto channels = static_cast<unsigned int>(this->channels());
GAPI_Assert(channels <= max_channels);
using func_p_t = void (*)(void*, int, Scalar const&);
using detail::assign_row;
#define TABLE_ENTRY(type) {assign_row<type, 1>, assign_row<type, 2>, assign_row<type, 3>, assign_row<type, 4>}
@ -145,10 +157,22 @@ namespace cv { namespace gapi { namespace own {
const auto depth = static_cast<unsigned int>(this->depth());
GAPI_Assert(depth < sizeof(func_tbl)/sizeof(func_tbl[0]));
for (int r = 0; r < rows; ++r)
if (dims.empty())
{
auto* f = func_tbl[depth][channels -1];
(*f)(static_cast<void *>(ptr(r)), cols, s );
const auto channels = static_cast<unsigned int>(this->channels());
GAPI_Assert(channels <= max_channels);
auto* f = func_tbl[depth][channels - 1];
for (int r = 0; r < rows; ++r)
{
(*f)(static_cast<void *>(ptr(r)), cols, s );
}
}
else
{
auto* f = func_tbl[depth][0];
// FIXME: better to refactor assign_row to use std::size_t by default
(*f)(static_cast<void *>(data), static_cast<int>(total()), s);
}
return *this;
}
@ -187,8 +211,9 @@ namespace cv { namespace gapi { namespace own {
/** @brief Returns the number of matrix channels.
The method returns the number of matrix channels.
If matrix is N-dimensional, -1 is returned.
*/
int channels() const {return CV_MAT_CN(flags);}
int channels() const {return dims.empty() ? CV_MAT_CN(flags) : -1;}
/**
@param _rows New number of rows.
@ -197,7 +222,7 @@ namespace cv { namespace gapi { namespace own {
*/
void create(int _rows, int _cols, int _type)
{
create({_cols, _rows}, _type);
create(Size{_cols, _rows}, _type);
}
/** @overload
@param _size Alternative new matrix size specification: Size(cols, rows)
@ -215,6 +240,18 @@ namespace cv { namespace gapi { namespace own {
}
}
void create(const std::vector<int> &_dims, int _type)
{
// FIXME: make a proper reallocation-on-demands
// WARNING: no tensor views, so no strides
Mat tmp{_dims, _type, nullptr};
// FIXME: this accumulate duplicates a lot
const auto sz = std::accumulate(_dims.begin(), _dims.end(), 1, std::multiplies<int>());
tmp.memory.reset(new uchar[CV_ELEM_SIZE(_type)*sz], [](uchar * p){delete[] p;});
tmp.data = tmp.memory.get();
*this = std::move(tmp);
}
/** @brief Copies the matrix to another one.
The method copies the matrix data to another matrix. Before copying the data, the method invokes :
@ -227,10 +264,18 @@ namespace cv { namespace gapi { namespace own {
*/
void copyTo(Mat& dst) const
{
dst.create(rows, cols, type());
for (int r = 0; r < rows; ++r)
if (dims.empty())
{
std::copy_n(ptr(r), detail::default_step(type(),cols), dst.ptr(r));
dst.create(rows, cols, type());
for (int r = 0; r < rows; ++r)
{
std::copy_n(ptr(r), detail::default_step(type(),cols), dst.ptr(r));
}
}
else
{
dst.create(dims, depth());
std::copy_n(data, total()*elemSize(), data);
}
}
@ -248,10 +293,12 @@ namespace cv { namespace gapi { namespace own {
*/
size_t total() const
{
return static_cast<size_t>(rows * cols);
return static_cast<std::size_t>
(dims.empty()
? (rows * cols)
: std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>()));
}
/** @overload
@param roi Extracted submatrix specified as a rectangle.
*/

View File

@ -7,6 +7,7 @@
#include "precomp.hpp"
#include <memory> // unique_ptr
#include <functional> // multiplies
#include <opencv2/gapi/gkernel.hpp>
#include <opencv2/gapi/own/convert.hpp>
@ -355,21 +356,39 @@ void writeBack(const Mag& mag, const RcDesc &rc, GRunArgP &g_arg, bool is_umat)
} // namespace magazine
void createMat(const cv::GMatDesc desc, cv::gapi::own::Mat& mat)
void createMat(const cv::GMatDesc &desc, cv::gapi::own::Mat& mat)
{
const auto type = desc.planar ? desc.depth : CV_MAKETYPE(desc.depth, desc.chan);
const auto size = desc.planar ? cv::gapi::own::Size{desc.size.width, desc.size.height*desc.chan}
: desc.size;
mat.create(size, type);
// FIXME: Refactor (probably start supporting N-Dimensional blobs natively
if (desc.dims.empty())
{
const auto type = desc.planar ? desc.depth : CV_MAKETYPE(desc.depth, desc.chan);
const auto size = desc.planar ? cv::gapi::own::Size{desc.size.width, desc.size.height*desc.chan}
: desc.size;
mat.create(size, type);
}
else
{
GAPI_Assert(!desc.planar);
mat.create(desc.dims, desc.depth);
}
}
#if !defined(GAPI_STANDALONE)
void createMat(const cv::GMatDesc desc, cv::Mat& mat)
void createMat(const cv::GMatDesc &desc, cv::Mat& mat)
{
const auto type = desc.planar ? desc.depth : CV_MAKETYPE(desc.depth, desc.chan);
const auto size = desc.planar ? cv::Size{desc.size.width, desc.size.height*desc.chan}
: cv::gapi::own::to_ocv(desc.size);
mat.create(size, type);
// FIXME: Refactor (probably start supporting N-Dimensional blobs natively
if (desc.dims.empty())
{
const auto type = desc.planar ? desc.depth : CV_MAKETYPE(desc.depth, desc.chan);
const auto size = desc.planar ? cv::Size{desc.size.width, desc.size.height*desc.chan}
: cv::gapi::own::to_ocv(desc.size);
mat.create(size, type);
}
else
{
GAPI_Assert(!desc.planar);
mat.create(desc.dims, desc.depth);
}
}
#endif

View File

@ -0,0 +1,27 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018-2019 Intel Corporation
#include "precomp.hpp"
#include <functional> // hash
#include <numeric> // accumulate
#include <unordered_set>
#include <iterator>
#include <ade/util/algorithm.hpp>
#include <opencv2/gapi/infer.hpp>
cv::gapi::GNetPackage::GNetPackage(std::initializer_list<GNetParam> &&ii)
: networks(std::move(ii)) {
}
std::vector<cv::gapi::GBackend> cv::gapi::GNetPackage::backends() const {
std::unordered_set<cv::gapi::GBackend> unique_set;
for (const auto &nn : networks) unique_set.insert(nn.backend);
return std::vector<cv::gapi::GBackend>(unique_set.begin(), unique_set.end());
}

View File

@ -6,6 +6,10 @@
#include "precomp.hpp"
#include <ade/util/iota_range.hpp>
#include <ade/util/algorithm.hpp>
#include <opencv2/gapi/opencv_includes.hpp>
#include <opencv2/gapi/own/mat.hpp> //gapi::own::Mat
#include <opencv2/gapi/gmat.hpp>
@ -49,20 +53,31 @@ namespace{
#if !defined(GAPI_STANDALONE)
cv::GMatDesc cv::descr_of(const cv::Mat &mat)
{
return GMatDesc{mat.depth(), mat.channels(), {mat.cols, mat.rows}};
const auto mat_dims = mat.size.dims();
if (mat_dims == 2)
return GMatDesc{mat.depth(), mat.channels(), {mat.cols, mat.rows}};
std::vector<int> dims(mat_dims);
for (auto i : ade::util::iota(mat_dims)) {
// Note: cv::MatSize is not iterable
dims[i] = mat.size[i];
}
return GMatDesc{mat.depth(), std::move(dims)};
}
cv::GMatDesc cv::descr_of(const cv::UMat &mat)
{
GAPI_Assert(mat.size.dims() == 2);
return GMatDesc{ mat.depth(), mat.channels(),{ mat.cols, mat.rows } };
}
cv::GMetaArgs cv::descr_of(const std::vector<cv::Mat> &vec)
cv::GMetaArgs cv::descrs_of(const std::vector<cv::Mat> &vec)
{
return vec_descr_of(vec);
}
cv::GMetaArgs cv::descr_of(const std::vector<cv::UMat> &vec)
cv::GMetaArgs cv::descrs_of(const std::vector<cv::UMat> &vec)
{
return vec_descr_of(vec);
}
@ -70,10 +85,12 @@ cv::GMetaArgs cv::descr_of(const std::vector<cv::UMat> &vec)
cv::GMatDesc cv::gapi::own::descr_of(const cv::gapi::own::Mat &mat)
{
return GMatDesc{mat.depth(), mat.channels(), {mat.cols, mat.rows}};
return (mat.dims.empty())
? GMatDesc{mat.depth(), mat.channels(), {mat.cols, mat.rows}}
: GMatDesc{mat.depth(), mat.dims};
}
cv::GMetaArgs cv::gapi::own::descr_of(const std::vector<cv::gapi::own::Mat> &vec)
cv::GMetaArgs cv::gapi::own::descrs_of(const std::vector<cv::gapi::own::Mat> &vec)
{
return vec_descr_of(vec);
}

View File

@ -99,9 +99,9 @@ inline cv::util::optional<T> getCompileArg(const cv::GCompileArgs &args)
return cv::util::optional<T>();
}
void createMat(const cv::GMatDesc desc, cv::gapi::own::Mat& mat);
void createMat(const cv::GMatDesc& desc, cv::gapi::own::Mat& mat);
#if !defined(GAPI_STANDALONE)
void createMat(const cv::GMatDesc desc, cv::Mat& mat);
void createMat(const cv::GMatDesc& desc, cv::Mat& mat);
#endif
}} // cv::gimpl

View File

@ -7,9 +7,6 @@
#include "precomp.hpp"
#include <functional>
#include <unordered_set>
#include <ade/util/algorithm.hpp>
#include <ade/util/range.hpp>
@ -26,8 +23,6 @@
#include "compiler/gmodel.hpp"
#include "backends/cpu/gcpubackend.hpp"
#include <opencv2/gapi/cpu/imgproc.hpp>
#include <opencv2/gapi/cpu/core.hpp>
#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
@ -76,7 +71,7 @@ cv::gapi::GBackend cv::gapi::cpu::backend()
return this_backend;
}
// GCPUExcecutable implementation //////////////////////////////////////////////
// GCPUExecutable implementation //////////////////////////////////////////////
cv::gimpl::GCPUExecutable::GCPUExecutable(const ade::Graph &g,
const std::vector<ade::NodeHandle> &nodes)
: m_g(g), m_gm(m_g)
@ -92,7 +87,7 @@ cv::gimpl::GCPUExecutable::GCPUExecutable(const ade::Graph &g,
{
m_dataNodes.push_back(nh);
const auto &desc = m_gm.metadata(nh).get<Data>();
if (desc.storage == Data::Storage::CONST)
if (desc.storage == Data::Storage::CONST_VAL)
{
auto rc = RcDesc{desc.rc, desc.shape, desc.ctor};
magazine::bindInArg(m_res, rc, m_gm.metadata(nh).get<ConstValue>().arg);

View File

@ -68,4 +68,4 @@ public:
}}
#endif // OPENCV_GAPI_GBACKEND_HPP
#endif // OPENCV_GAPI_GCPUBACKEND_HPP

View File

@ -0,0 +1,604 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
#include "precomp.hpp"
#ifdef HAVE_INF_ENGINE
#if INF_ENGINE_RELEASE <= 2018050000
# error G-API IE module supports only OpenVINO IE >= 2019 R1
#endif
#include <functional>
#include <unordered_set>
#include <ade/util/algorithm.hpp>
#include <ade/util/range.hpp>
#include <ade/util/zip_range.hpp>
#include <ade/util/chain_range.hpp>
#include <ade/typed_graph.hpp>
#include <opencv2/gapi/gcommon.hpp>
#include <opencv2/gapi/garray.hpp>
#include <opencv2/gapi/util/any.hpp>
#include <opencv2/gapi/gtype_traits.hpp>
#include <opencv2/gapi/infer.hpp>
#include <opencv2/gapi/infer/ie/util.hpp>
#include "compiler/gobjref.hpp"
#include "compiler/gmodel.hpp"
#include "backends/ie/giebackend.hpp"
#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
namespace IE = InferenceEngine;
namespace {
inline IE::ROI toIE(const cv::Rect &rc) {
return IE::ROI
{ 0u
, static_cast<std::size_t>(rc.x)
, static_cast<std::size_t>(rc.y)
, static_cast<std::size_t>(rc.width)
, static_cast<std::size_t>(rc.height)
};
}
inline IE::SizeVector toIE(const cv::MatSize &sz) {
return cv::to_own<IE::SizeVector::value_type>(sz);
}
inline std::vector<int> toCV(const IE::SizeVector &vsz) {
std::vector<int> result;
result.reserve(vsz.size());
for (auto sz : vsz) {
result.push_back(ade::util::checked_cast<int>(sz));
}
return result;
}
inline IE::Precision toIE(int depth) {
switch (depth) {
case CV_8U: return IE::Precision::U8;
case CV_32F: return IE::Precision::FP32;
default: GAPI_Assert(false && "Unsupported data type");
}
return IE::Precision::UNSPECIFIED;
}
inline int toCV(IE::Precision prec) {
switch (prec) {
case IE::Precision::U8: return CV_8U;
case IE::Precision::FP32: return CV_32F;
default: GAPI_Assert(false && "Unsupported data type");
}
return -1;
}
inline IE::TensorDesc toIE(const cv::Mat &mat) {
const auto &sz = mat.size;
// NB: For some reason RGB image is 2D image
// (since channel component is not counted here).
if (sz.dims() == 2) {
// NB: This logic is mainly taken from IE samples
const size_t channels = mat.channels();
const size_t height = mat.size().height;
const size_t width = mat.size().width;
const size_t strideH = mat.step.buf[0];
const size_t strideW = mat.step.buf[1];
const bool is_dense =
strideW == channels &&
strideH == channels * width;
if (!is_dense)
cv::util::throw_error(std::logic_error("Doesn't support conversion"
" from non-dense cv::Mat"));
return IE::TensorDesc(toIE(mat.depth()),
IE::SizeVector{1, channels, height, width},
IE::Layout::NHWC);
}
GAPI_Assert(sz.dims() == 4); // NB: Will relax when needed (to known use)
return IE::TensorDesc(toIE(mat.depth()), toIE(sz), IE::Layout::NCHW);
}
inline IE::Blob::Ptr wrapIE(const cv::Mat &mat) {
const auto tDesc = toIE(mat);
switch (mat.depth()) {
// NB: Seems there's no way to create an untyped (T-less) Blob::Ptr
// in IE given only precision via TensorDesc. So we have to do this:
#define HANDLE(E,T) \
case CV_##E: return IE::make_shared_blob<T>(tDesc, const_cast<T*>(mat.ptr<T>()))
HANDLE(8U, uint8_t);
HANDLE(32F, float);
#undef HANDLE
default: GAPI_Assert(false && "Unsupported data type");
}
return IE::Blob::Ptr{};
}
template<class MatType>
inline void copyFromIE(const IE::Blob::Ptr &blob, MatType &mat) {
switch (blob->getTensorDesc().getPrecision()) {
#define HANDLE(E,T) \
case IE::Precision::E: std::copy_n(blob->buffer().as<T*>(), \
mat.total(), \
reinterpret_cast<T*>(mat.data)); \
break;
HANDLE(U8, uint8_t);
HANDLE(FP32, float);
#undef HANDLE
default: GAPI_Assert(false && "Unsupported data type");
}
}
// IE-specific metadata, represents a network with its parameters
struct IEUnit {
static const char *name() { return "IEModelConfig"; }
cv::gapi::ie::detail::ParamDesc params;
IE::CNNNetwork net;
IE::InputsDataMap inputs;
IE::OutputsDataMap outputs;
explicit IEUnit(const cv::gapi::ie::detail::ParamDesc &pp)
: params(pp) {
IE::CNNNetReader reader;
reader.ReadNetwork(params.model_path);
reader.ReadWeights(params.weights_path);
net = reader.getNetwork();
inputs = net.getInputsInfo();
outputs = net.getOutputsInfo();
// The practice shows that not all inputs and not all outputs
// are mandatory to specify in IE model.
// So what we're concerned here about is:
// if opeation's (not topology's) input/output number is
// greater than 1, then we do care about input/output layer
// names. Otherwise, names are picked up automatically.
// TODO: Probably this check could be done at the API entry point? (gnet)
if (params.num_in > 1u && params.num_in != params.input_names.size()) {
cv::util::throw_error(std::logic_error("Please specify input layer names for "
+ params.model_path));
}
if (params.num_out > 1u && params.num_out != params.output_names.size()) {
cv::util::throw_error(std::logic_error("Please specify output layer names for "
+ params.model_path));
}
if (params.num_in == 1u && params.input_names.empty()) {
params.input_names = { inputs.begin()->first };
}
if (params.num_out == 1u && params.output_names.empty()) {
params.output_names = { outputs.begin()->first };
}
}
// This method is [supposed to be] called at Island compilation stage
cv::gimpl::ie::IECompiled compile() const {
auto this_plugin = IE::PluginDispatcher().getPluginByDevice(params.device_id);
auto this_network = this_plugin.LoadNetwork(net, {}); // FIXME: 2nd parameter to be
// configurable via the API
auto this_request = this_network.CreateInferRequest();
// Bind const data to infer request
for (auto &&p : params.const_inputs) {
this_request.SetBlob(p.first, wrapIE(p.second));
}
return {this_plugin, this_network, this_request};
}
};
struct IECallContext
{
// Input parameters passed to an inference operation.
std::vector<cv::GArg> args;
//FIXME: avoid conversion of arguments from internal representaion to OpenCV one on each call
//to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run,
//once on enter for input and output arguments, and once before return for output arguments only
//FIXME: check if the above applies to this backend (taken from CPU)
std::unordered_map<std::size_t, cv::GRunArgP> results;
// Generic accessor API
template<typename T>
const T& inArg(std::size_t input) { return args.at(input).get<T>(); }
// Syntax sugar
const cv::gapi::own::Mat& inMat(std::size_t input) {
return inArg<cv::gapi::own::Mat>(input);
}
cv::gapi::own::Mat& outMatR(std::size_t output) {
return *cv::util::get<cv::gapi::own::Mat*>(results.at(output));
}
template<typename T> std::vector<T>& outVecR(std::size_t output) { // FIXME: the same issue
return outVecRef(output).wref<T>();
}
cv::detail::VectorRef& outVecRef(std::size_t output) {
return cv::util::get<cv::detail::VectorRef>(results.at(output));
}
};
struct IECallable {
static const char *name() { return "IERequestCallable"; }
// FIXME: Make IECallContext manage them all? (3->1)
using Run = std::function<void(cv::gimpl::ie::IECompiled &, const IEUnit &, IECallContext &)>;
Run run;
};
struct KImpl {
cv::gimpl::CustomMetaFunction::CM customMetaFunc;
IECallable::Run run;
};
// FIXME: Is there a way to take a typed graph (our GModel),
// and create a new typed graph _ATOP_ of that (by extending with a couple of
// new types?).
// Alternatively, is there a way to compose types graphs?
//
// If not, we need to introduce that!
using GIEModel = ade::TypedGraph
< cv::gimpl::Protocol
, cv::gimpl::Op
, cv::gimpl::NetworkParams
, cv::gimpl::CustomMetaFunction
, IEUnit
, IECallable
>;
// FIXME: Same issue with Typed and ConstTyped
using GConstGIEModel = ade::ConstTypedGraph
< cv::gimpl::Protocol
, cv::gimpl::Op
, cv::gimpl::NetworkParams
, cv::gimpl::CustomMetaFunction
, IEUnit
, IECallable
>;
} // anonymous namespace
// GCPUExcecutable implementation //////////////////////////////////////////////
cv::gimpl::ie::GIEExecutable::GIEExecutable(const ade::Graph &g,
const std::vector<ade::NodeHandle> &nodes)
: m_g(g), m_gm(m_g) {
// FIXME: Currently this backend is capable to run a single inference node only.
// Need to extend our island fusion with merge/not-to-merge decision making parametrization
GConstGIEModel iem(g);
for (auto &nh : nodes) {
switch (m_gm.metadata(nh).get<NodeType>().t) {
case NodeType::OP:
if (this_nh == nullptr) {
this_nh = nh;
this_iec = iem.metadata(this_nh).get<IEUnit>().compile();
}
else
util::throw_error(std::logic_error("Multi-node inference is not supported!"));
break;
case NodeType::DATA: {
m_dataNodes.push_back(nh);
const auto &desc = m_gm.metadata(nh).get<Data>();
if (desc.storage == Data::Storage::CONST_VAL) {
util::throw_error(std::logic_error("No const data please!"));
}
if (desc.storage == Data::Storage::INTERNAL) {
util::throw_error(std::logic_error("No internal data please!"));
}
break;
}
default: util::throw_error(std::logic_error("Unsupported NodeType type"));
}
}
}
// FIXME: Document what it does
cv::GArg cv::gimpl::ie::GIEExecutable::packArg(const cv::GArg &arg) {
// No API placeholders allowed at this point
// FIXME: this check has to be done somewhere in compilation stage.
GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT
&& arg.kind != cv::detail::ArgKind::GSCALAR
&& arg.kind != cv::detail::ArgKind::GARRAY);
if (arg.kind != cv::detail::ArgKind::GOBJREF) {
util::throw_error(std::logic_error("Inference supports G-types ONLY!"));
}
GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF);
// Wrap associated CPU object (either host or an internal one)
// FIXME: object can be moved out!!! GExecutor faced that.
const cv::gimpl::RcDesc &ref = arg.get<cv::gimpl::RcDesc>();
switch (ref.shape)
{
case GShape::GMAT: return GArg(m_res.slot<cv::gapi::own::Mat>()[ref.id]);
// Note: .at() is intentional for GArray as object MUST be already there
// (and constructed by either bindIn/Out or resetInternal)
case GShape::GARRAY: return GArg(m_res.slot<cv::detail::VectorRef>().at(ref.id));
default:
util::throw_error(std::logic_error("Unsupported GShape type"));
break;
}
}
void cv::gimpl::ie::GIEExecutable::run(std::vector<InObj> &&input_objs,
std::vector<OutObj> &&output_objs) {
// Update resources with run-time information - what this Island
// has received from user (or from another Island, or mix...)
// FIXME: Check input/output objects against GIsland protocol
for (auto& it : input_objs) magazine::bindInArg (m_res, it.first, it.second);
for (auto& it : output_objs) magazine::bindOutArg(m_res, it.first, it.second);
// FIXME: Running just a single node now.
// Not sure if need to support many of them, though
// FIXME: Make this island-unmergeable?
const auto &op = m_gm.metadata(this_nh).get<Op>();
// Initialize kernel's execution context:
// - Input parameters
IECallContext context;
context.args.reserve(op.args.size());
using namespace std::placeholders;
ade::util::transform(op.args,
std::back_inserter(context.args),
std::bind(&GIEExecutable::packArg, this, _1));
// - Output parameters.
for (const auto &out_it : ade::util::indexed(op.outs)) {
// FIXME: Can the same GArg type resolution mechanism be reused here?
const auto out_port = ade::util::index(out_it);
const auto out_desc = ade::util::value(out_it);
context.results[out_port] = magazine::getObjPtr(m_res, out_desc);
}
// And now trigger the execution
GConstGIEModel giem(m_g);
const auto &uu = giem.metadata(this_nh).get<IEUnit>();
const auto &kk = giem.metadata(this_nh).get<IECallable>();
kk.run(this_iec, uu, context);
for (auto &it : output_objs) magazine::writeBack(m_res, it.first, it.second);
}
namespace cv {
namespace gimpl {
namespace ie {
struct Infer: public cv::detail::KernelTag {
using API = cv::GInferBase;
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
static KImpl kernel() { return KImpl{outMeta, run}; }
static cv::GMetaArgs outMeta(const ade::Graph &gr,
const ade::NodeHandle &nh,
const cv::GMetaArgs &in_metas,
const cv::GArgs &/*in_args*/) {
// Specify network's output layer metadata to the framework
// Also specify the input information to the IE from the framework
// NB: Have no clue if network's input [dimensions] may ever define
// its output dimensions. It seems possible with OpenCV DNN APIs
cv::GMetaArgs result;
GConstGIEModel gm(gr);
const auto &uu = gm.metadata(nh).get<IEUnit>();
// Initialize input information
// Note our input layers list order matches the API order and so
// meta order.
GAPI_Assert(uu.params.input_names.size() == in_metas.size()
&& "Known input layers count doesn't match input meta count");
for (auto &&it : ade::util::zip(ade::util::toRange(uu.params.input_names),
ade::util::toRange(in_metas))) {
auto &&ii = uu.inputs.at(std::get<0>(it));
const auto & mm = std::get<1>(it);
GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
&& "Non-GMat inputs are not supported");
const auto &meta = util::get<cv::GMatDesc>(mm);
ii->setPrecision(toIE(meta.depth));
ii->setLayout(meta.isND() ? IE::Layout::NCHW : IE::Layout::NHWC);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
// FIXME: It would be nice here to have an exact number of network's
// input/output parameters. Probably GCall should store it here for us.
// It doesn't, as far as I know..
for (const auto &out_name : uu.params.output_names) {
// NOTE: our output_names vector follows the API order
// of this operation's outputs
const IE::DataPtr& ie_out = uu.outputs.at(out_name);
const IE::SizeVector dims = ie_out->getTensorDesc().getDims();
cv::GMatDesc outm(toCV(ie_out->getPrecision()),
toCV(ie_out->getTensorDesc().getDims()));
result.emplace_back(outm);
}
return result;
}
static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) {
// non-generic version for now:
// - assumes all inputs/outputs are always Mats
for (auto i : ade::util::iota(uu.params.num_in)) {
// TODO: Ideally we shouldn't do SetBlob() but GetBlob() instead,
// and redirect our data producers to this memory
// (A memory dialog comes to the picture again)
const cv::Mat this_mat = to_ocv(ctx.inMat(i));
IE::Blob::Ptr this_blob = wrapIE(this_mat);
iec.this_request.SetBlob(uu.params.input_names[i], this_blob);
}
iec.this_request.Infer();
for (auto i : ade::util::iota(uu.params.num_out)) {
// TODO: Think on avoiding copying here.
// Either we should ask IE to use our memory (what is not always the
// best policy) or use IE-allocated buffer inside (and pass it to the graph).
// Not a <very> big deal for classifiers and detectors,
// but may be critical to segmentation.
cv::gapi::own::Mat& out_mat = ctx.outMatR(i);
IE::Blob::Ptr this_blob = iec.this_request.GetBlob(uu.params.output_names[i]);
copyFromIE(this_blob, out_mat);
}
}
};
struct InferList: public cv::detail::KernelTag {
using API = cv::GInferListBase;
static cv::gapi::GBackend backend() { return cv::gapi::ie::backend(); }
static KImpl kernel() { return KImpl{outMeta, run}; }
static cv::GMetaArgs outMeta(const ade::Graph &gr,
const ade::NodeHandle &nh,
const cv::GMetaArgs &in_metas,
const cv::GArgs &/*in_args*/) {
// Specify the input information to the IE from the framework
// NB: Have no clue if network's input [dimensions] may ever define
// its output dimensions. It seems possible with OpenCV DNN APIs
GConstGIEModel gm(gr);
const auto &uu = gm.metadata(nh).get<IEUnit>();
// Initialize input information
// Note our input layers list order matches the API order and so
// meta order.
GAPI_Assert(uu.params.input_names.size() == (in_metas.size() - 1u)
&& "Known input layers count doesn't match input meta count");
std::size_t idx = 1u;
for (auto &&input_name : uu.params.input_names) {
auto &&ii = uu.inputs.at(input_name);
const auto & mm = in_metas[idx++];
GAPI_Assert(util::holds_alternative<cv::GMatDesc>(mm)
&& "Non-GMat inputs are not supported");
const auto &meta = util::get<cv::GMatDesc>(mm);
ii->setPrecision(toIE(meta.depth));
ii->setLayout(meta.isND() ? IE::Layout::NCHW : IE::Layout::NHWC);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
}
// roi-list version is much easier at the moment.
// All our outputs are vectors which don't have
// metadata at the moment - so just create a vector of
// "empty" array metadatas of the required size.
return cv::GMetaArgs(uu.params.output_names.size(),
cv::GMetaArg{cv::empty_array_desc()});
}
static void run(IECompiled &iec, const IEUnit &uu, IECallContext &ctx) {
// non-generic version for now:
// - assumes zero input is always ROI list
// - assumes all inputs/outputs are always Mats
GAPI_Assert(uu.params.num_in == 1); // roi list is not counted in net's inputs
const auto& in_roi_vec = ctx.inArg<cv::detail::VectorRef>(0u).rref<cv::Rect>();
const cv::Mat this_mat = to_ocv(ctx.inMat(1u));
IE::Blob::Ptr this_blob = wrapIE(this_mat);
// FIXME: This could be done ONCE at graph compile stage!
std::vector< std::vector<int> > cached_dims(uu.params.num_out);
for (auto i : ade::util::iota(uu.params.num_out)) {
const IE::DataPtr& ie_out = uu.outputs.at(uu.params.output_names[i]);
cached_dims[i] = toCV(ie_out->getTensorDesc().getDims());
ctx.outVecR<cv::Mat>(i).clear();
// FIXME: Isn't this should be done automatically
// by some resetInternalData(), etc? (Probably at the GExecutor level)
}
for (const auto &rc : in_roi_vec) {
// FIXME: Assumed only 1 input
IE::Blob::Ptr roi_blob = IE::make_shared_blob(this_blob, toIE(rc));
iec.this_request.SetBlob(uu.params.input_names[0u], roi_blob);
iec.this_request.Infer();
// While input is fixed to be 1,
// there may be still multiple outputs
for (auto i : ade::util::iota(uu.params.num_out)) {
std::vector<cv::Mat> &out_vec = ctx.outVecR<cv::Mat>(i);
IE::Blob::Ptr out_blob = iec.this_request.GetBlob(uu.params.output_names[i]);
cv::Mat out_mat(cached_dims[i], toCV(out_blob->getTensorDesc().getPrecision()));
copyFromIE(out_blob, out_mat); // FIXME: Avoid data copy. Not sure if it is possible though
out_vec.push_back(std::move(out_mat));
}
}
}
};
} // namespace ie
} // namespace gapi
} // namespace cv
// IE backend implementation of GBackend::Priv ///////////////////////
namespace {
class GIEBackendImpl final: public cv::gapi::GBackend::Priv {
virtual void unpackKernel(ade::Graph &gr,
const ade::NodeHandle &nh,
const cv::GKernelImpl &ii) override {
using namespace cv::gimpl;
// FIXME: Introduce a DNNBackend interface which'd specify
// the framework for this???
GIEModel gm(gr);
const auto &np = gm.metadata(nh).get<NetworkParams>();
const auto &pp = cv::util::any_cast<cv::gapi::ie::detail::ParamDesc>(np.opaque);
const auto &ki = cv::util::any_cast<KImpl>(ii.opaque);
gm.metadata(nh).set(IEUnit{pp});
gm.metadata(nh).set(IECallable{ki.run});
gm.metadata(nh).set(CustomMetaFunction{ki.customMetaFunc});
}
virtual EPtr compile(const ade::Graph &graph,
const cv::GCompileArgs &,
const std::vector<ade::NodeHandle> &nodes) const override {
return EPtr{new cv::gimpl::ie::GIEExecutable(graph, nodes)};
}
virtual cv::gapi::GKernelPackage auxiliaryKernels() const override {
return cv::gapi::kernels< cv::gimpl::ie::Infer
, cv::gimpl::ie::InferList
>();
}
};
}
cv::gapi::GBackend cv::gapi::ie::backend() {
static cv::gapi::GBackend this_backend(std::make_shared<GIEBackendImpl>());
return this_backend;
}
cv::Mat cv::gapi::ie::util::to_ocv(InferenceEngine::Blob::Ptr blob) {
const auto& tdesc = blob->getTensorDesc();
return cv::Mat(toCV(tdesc.getDims()),
toCV(tdesc.getPrecision()),
blob->buffer().as<uint8_t*>());
}
std::vector<int> cv::gapi::ie::util::to_ocv(const InferenceEngine::SizeVector &dims) {
return toCV(dims);
}
InferenceEngine::Blob::Ptr cv::gapi::ie::util::to_ie(cv::Mat &blob) {
return wrapIE(blob);
}
#endif // HAVE_INF_ENGINE

View File

@ -0,0 +1,89 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2018 Intel Corporation
#ifndef OPENCV_GAPI_GIEBACKEND_HPP
#define OPENCV_GAPI_GIEBACKEND_HPP
#ifdef HAVE_INF_ENGINE
#include <ade/util/algorithm.hpp> // type_list_index
////////////////////////////////////////////////////////////////////////////////
// FIXME: Suppress deprecation warnings for OpenVINO 2019R2+
// BEGIN {{{
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#ifdef _MSC_VER
#pragma warning(disable: 4996) // was declared deprecated
#endif
#if defined(__GNUC__)
#pragma GCC visibility push(default)
#endif
#include <inference_engine.hpp>
#if defined(__GNUC__)
#pragma GCC visibility pop
#endif
// END }}}
////////////////////////////////////////////////////////////////////////////////
#include <opencv2/gapi/garg.hpp>
#include <opencv2/gapi/gproto.hpp>
#include <opencv2/gapi/infer/ie.hpp>
#include "api/gorigin.hpp"
#include "backends/common/gbackend.hpp"
#include "compiler/gislandmodel.hpp"
namespace cv {
namespace gimpl {
namespace ie {
struct IECompiled {
InferenceEngine::InferencePlugin this_plugin;
InferenceEngine::ExecutableNetwork this_network;
InferenceEngine::InferRequest this_request;
};
class GIEExecutable final: public GIslandExecutable
{
const ade::Graph &m_g;
GModel::ConstGraph m_gm;
// The only executable stuff in this graph
// (assuming it is always single-op)
ade::NodeHandle this_nh;
IECompiled this_iec;
// List of all resources in graph (both internal and external)
std::vector<ade::NodeHandle> m_dataNodes;
// Actual data of all resources in graph (both internal and external)
Mag m_res;
// Execution helpers
GArg packArg(const GArg &arg);
public:
GIEExecutable(const ade::Graph &graph,
const std::vector<ade::NodeHandle> &nodes);
virtual inline bool canReshape() const override { return false; }
virtual inline void reshape(ade::Graph&, const GCompileArgs&) override {
GAPI_Assert(false); // Not implemented yet
}
virtual void run(std::vector<InObj> &&input_objs,
std::vector<OutObj> &&output_objs) override;
};
}}}
#endif // HAVE_INF_ENGINE
#endif // OPENCV_GAPI_GIEBACKEND_HPP

View File

@ -7,9 +7,6 @@
#include "precomp.hpp"
#include <functional>
#include <unordered_set>
#include <ade/util/algorithm.hpp>
#include <ade/util/range.hpp>
@ -26,8 +23,6 @@
#include "compiler/gmodel.hpp"
#include "backends/ocl/goclbackend.hpp"
#include "backends/ocl/goclimgproc.hpp"
#include "backends/ocl/goclcore.hpp"
#include "api/gbackend_priv.hpp" // FIXME: Make it part of Backend SDK!
@ -92,7 +87,7 @@ cv::gimpl::GOCLExecutable::GOCLExecutable(const ade::Graph &g,
{
m_dataNodes.push_back(nh);
const auto &desc = m_gm.metadata(nh).get<Data>();
if (desc.storage == Data::Storage::CONST)
if (desc.storage == Data::Storage::CONST_VAL)
{
auto rc = RcDesc{desc.rc, desc.shape, desc.ctor};
magazine::bindInArg(m_res, rc, m_gm.metadata(nh).get<ConstValue>().arg);

View File

@ -72,6 +72,12 @@ namespace
return combine(ocv_pkg, user_pkg_with_aux);
}
cv::gapi::GNetPackage getNetworkPackage(cv::GCompileArgs &args)
{
return cv::gimpl::getCompileArg<cv::gapi::GNetPackage>(args)
.value_or(cv::gapi::GNetPackage{});
}
cv::util::optional<std::string> getGraphDumpDirectory(cv::GCompileArgs& args)
{
auto dump_info = cv::gimpl::getCompileArg<cv::graph_dump_path>(args);
@ -87,6 +93,16 @@ namespace
return cv::util::make_optional(dump_info.value().m_dump_path);
}
}
template<typename C>
cv::gapi::GKernelPackage auxKernelsFrom(const C& c) {
cv::gapi::GKernelPackage result;
for (const auto &b : c) {
result = cv::gapi::combine(result, b.priv().auxiliaryKernels());
}
return result;
}
} // anonymous namespace
@ -98,13 +114,28 @@ cv::gimpl::GCompiler::GCompiler(const cv::GComputation &c,
: m_c(c), m_metas(std::move(metas)), m_args(std::move(args))
{
using namespace std::placeholders;
m_all_kernels = getKernelPackage(m_args);
auto dump_path = getGraphDumpDirectory(m_args);
auto kernels_to_use = getKernelPackage(m_args);
auto networks_to_use = getNetworkPackage(m_args);
std::unordered_set<cv::gapi::GBackend> all_backends;
const auto take = [&](std::vector<cv::gapi::GBackend> &&v) {
all_backends.insert(v.begin(), v.end());
};
take(kernels_to_use.backends());
take(networks_to_use.backends());
m_all_kernels = cv::gapi::combine(kernels_to_use,
auxKernelsFrom(all_backends));
// NB: The expectation in the line above is that
// NN backends (present here via network package) always add their
// inference kernels via auxiliary...()
auto dump_path = getGraphDumpDirectory(m_args);
m_e.addPassStage("init");
m_e.addPass("init", "check_cycles", ade::passes::CheckCycles());
m_e.addPass("init", "expand_kernels", std::bind(passes::expandKernels, _1,
m_all_kernels)); // NB: package is copied
m_e.addPass("init", "expand_kernels",
std::bind(passes::expandKernels, _1,
m_all_kernels)); // NB: package is copied
m_e.addPass("init", "topo_sort", ade::passes::TopologicalSort());
m_e.addPass("init", "init_islands", passes::initIslands);
m_e.addPass("init", "check_islands", passes::checkIslands);
@ -117,8 +148,13 @@ cv::gimpl::GCompiler::GCompiler(const cv::GComputation &c,
m_all_kernels.remove(cv::gapi::compound::backend());
m_e.addPassStage("kernels");
m_e.addPass("kernels", "resolve_kernels", std::bind(passes::resolveKernels, _1,
std::ref(m_all_kernels))); // NB: and not copied here
m_e.addPass("kernels", "bind_net_params",
std::bind(passes::bindNetParams, _1,
networks_to_use));
m_e.addPass("kernels", "resolve_kernels",
std::bind(passes::resolveKernels, _1,
std::ref(m_all_kernels))); // NB: and not copied here
// (no compound backend present here)
m_e.addPass("kernels", "check_islands_content", passes::checkIslandsContent);
m_e.addPassStage("meta");
@ -142,7 +178,9 @@ cv::gimpl::GCompiler::GCompiler(const cv::GComputation &c,
dump_path.value()));
}
// Process backends at the last moment (after all G-API passes are added).
// FIXME: This should be called for "ActiveBackends" only (see metadata).
// However, ActiveBackends are known only after passes are actually executed.
// At these stage, they are not executed yet.
ade::ExecutionEngineSetupContext ectx(m_e);
auto backends = m_all_kernels.backends();
for (auto &b : backends)

View File

@ -11,6 +11,7 @@
#include <opencv2/gapi/gcommon.hpp>
#include <opencv2/gapi/gkernel.hpp>
#include <opencv2/gapi/infer.hpp>
#include <opencv2/gapi/gcomputation.hpp>
#include <ade/execution_engine/execution_engine.hpp>
@ -26,6 +27,7 @@ class GAPI_EXPORTS GCompiler
ade::ExecutionEngine m_e;
cv::gapi::GKernelPackage m_all_kernels;
cv::gapi::GNetPackage m_all_networks;
void validateInputMeta();
void validateOutProtoArgs();

View File

@ -47,7 +47,7 @@ ade::NodeHandle GModel::mkDataNode(GModel::Graph &g, const GOrigin& origin)
{
auto value = value_of(origin);
meta = descr_of(value);
storage = Data::Storage::CONST;
storage = Data::Storage::CONST_VAL;
g.metadata(data_h).set(ConstValue{value});
}
g.metadata(data_h).set(Data{origin.shape, id, meta, origin.ctor, storage});

View File

@ -22,6 +22,8 @@
// This part of the system is API-unaware by its design.
//
#include <opencv2/gapi/util/any.hpp>
#include <opencv2/gapi/garg.hpp>
#include <opencv2/gapi/gkernel.hpp>
@ -76,7 +78,8 @@ struct Data
INTERNAL, // data object is not listed in GComputation protocol
INPUT, // data object is listed in GComputation protocol as Input
OUTPUT, // data object is listed in GComputation protocol as Output
CONST, // data object is constant
CONST_VAL, // data object is constant.
// Note: CONST is sometimes defined in Win sys headers
};
Storage storage;
};
@ -142,6 +145,33 @@ struct ActiveBackends
std::unordered_set<cv::gapi::GBackend> backends;
};
// Backend-specific inference parameters for a neural network.
// Since these parameters are set on compilation stage (not
// on a construction stage), these parameters are bound lately
// to the operation node.
// NB: These parameters are not included into GModel by default
// since it is not used regularly by all parties.
struct NetworkParams
{
static const char *name() { return "NetworkParams"; }
cv::util::any opaque;
};
// This is a custom metadata handling operator.
// Sometimes outMeta() can't be bound to input parameters only
// so several backends (today -- mainly inference) may find this useful.
// If provided, the meta inference pass uses this function instead of
// OP.k.outMeta.
struct CustomMetaFunction
{
static const char *name() { return "CustomMetaFunction"; }
using CM = std::function< cv::GMetaArgs( const ade::Graph &,
const ade::NodeHandle &,
const cv::GMetaArgs &,
const cv::GArgs &)>;
CM customOutMeta;
};
namespace GModel
{
using Graph = ade::TypedGraph
@ -159,6 +189,7 @@ namespace GModel
, DataObjectCounter
, IslandModel
, ActiveBackends
, CustomMetaFunction
>;
// FIXME: How to define it based on GModel???
@ -177,6 +208,7 @@ namespace GModel
, DataObjectCounter
, IslandModel
, ActiveBackends
, CustomMetaFunction
>;
// FIXME:

View File

@ -12,6 +12,8 @@
#include <ade/passes/check_cycles.hpp>
#include <opencv2/gapi/gcompoundkernel.hpp> // compound::backend()
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
#include <opencv2/gapi/infer.hpp> // GNetPackage
#include "compiler/gmodel.hpp"
#include "compiler/passes/passes.hpp"
@ -97,7 +99,37 @@ namespace
gr.erase(subgr_out_nh);
}
}
} // anonymous namespace
// This pass, given the network package, associates every infer[list] node
// with particular inference backend and its parameters.
void cv::gimpl::passes::bindNetParams(ade::passes::PassContext &ctx,
const gapi::GNetPackage &pkg)
{
GModel::Graph gr(ctx.graph);
ade::TypedGraph<NetworkParams> pgr(ctx.graph);
for (const auto &nh : gr.nodes())
{
if (gr.metadata(nh).get<NodeType>().t == NodeType::OP)
{
auto &op = gr.metadata(nh).get<Op>();
if (op.k.tag.empty())
continue;
// FIXME: What if there's more than one???
const auto it = ade::util::find_if(pkg.networks,
[&](const cv::gapi::GNetParam &p) {
return p.tag == op.k.tag;
});
if (it == std::end(pkg.networks))
continue;
pgr.metadata(nh).set(NetworkParams{it->params});
}
}
}
// This pass, given the kernel package, selects a kernel implementation
// for every operation in the graph
void cv::gimpl::passes::resolveKernels(ade::passes::PassContext &ctx,

View File

@ -49,7 +49,8 @@ void cv::gimpl::passes::inferMeta(ade::passes::PassContext &ctx, bool meta_is_in
// Prepare operation's input metadata vector
// Note that it's size is usually different from nh.inEdges.size(),
// and its element count is equal to operation's arguments count.
// and its element count is equal to operation's arguments count
// (which may contain graph-construction-time parameters like integers, etc)
GMetaArgs input_meta_args(op.args.size());
// Iterate through input edges, update input_meta_args's slots
@ -66,16 +67,22 @@ void cv::gimpl::passes::inferMeta(ade::passes::PassContext &ctx, bool meta_is_in
{
// No meta in an input argument - a fatal error
// (note graph is traversed here in topoligcal order)
util::throw_error(std::logic_error("Fatal: input object's metadata "
"not found!"));
util::throw_error(std::logic_error("Fatal: input object's metadata "
"not found!"));
// FIXME: Add more details!!!
}
input_meta_args.at(input_port) = input_meta;
}
// Now ask kernel for it's output meta.
// Resulting out_args may have a larger size than op.outs, since some
// outputs could stay unused (unconnected)
const auto out_metas = op.k.outMeta(input_meta_args, op.args);
const auto out_metas = gr.metadata(nh).contains<CustomMetaFunction>()
? gr.metadata(nh).get<CustomMetaFunction>().customOutMeta(ctx.graph,
nh,
input_meta_args,
op.args)
: op.k.outMeta(input_meta_args, op.args);
// Walk through operation's outputs, update meta of output objects
// appropriately

View File

@ -25,6 +25,12 @@ namespace ade {
namespace cv {
// Forward declarations - internal
namespace gapi {
class GKernelPackage;
struct GNetPackage;
} // namespace gapi
namespace gimpl { namespace passes {
void dumpDot(const ade::Graph &g, std::ostream& os);
@ -44,6 +50,9 @@ void storeResultingMeta(ade::passes::PassContext &ctx);
void expandKernels(ade::passes::PassContext &ctx,
const gapi::GKernelPackage& kernels);
void bindNetParams(ade::passes::PassContext &ctx,
const gapi::GNetPackage &networks);
void resolveKernels(ade::passes::PassContext &ctx,
const gapi::GKernelPackage &kernels);

View File

@ -88,7 +88,7 @@ void cv::gimpl::GExecutor::initResource(const ade::NodeHandle &orig_nh)
const Data &d = m_gm.metadata(orig_nh).get<Data>();
if ( d.storage != Data::Storage::INTERNAL
&& d.storage != Data::Storage::CONST)
&& d.storage != Data::Storage::CONST_VAL)
return;
// INTERNALS+CONST only! no need to allocate/reset output objects
@ -105,7 +105,7 @@ void cv::gimpl::GExecutor::initResource(const ade::NodeHandle &orig_nh)
break;
case GShape::GSCALAR:
if (d.storage == Data::Storage::CONST)
if (d.storage == Data::Storage::CONST_VAL)
{
auto rc = RcDesc{d.rc, d.shape, d.ctor};
magazine::bindInArg(m_res, rc, m_gm.metadata(orig_nh).get<ConstValue>().arg);

View File

@ -31,6 +31,7 @@ TEST(GAPI_MetaDesc, MatDesc)
EXPECT_EQ(1, desc1.chan);
EXPECT_EQ(320, desc1.size.width);
EXPECT_EQ(240, desc1.size.height);
EXPECT_FALSE(desc1.isND());
cv::Mat m2(480, 640, CV_8UC3);
const auto desc2 = cv::descr_of(m2);
@ -38,6 +39,21 @@ TEST(GAPI_MetaDesc, MatDesc)
EXPECT_EQ(3, desc2.chan);
EXPECT_EQ(640, desc2.size.width);
EXPECT_EQ(480, desc2.size.height);
EXPECT_FALSE(desc2.isND());
}
TEST(GAPI_MetaDesc, MatDescND)
{
std::vector<int> dims = {1,3,299,299};
cv::Mat m(dims, CV_32F);
const auto desc = cv::descr_of(m);
EXPECT_EQ(CV_32F, desc.depth);
EXPECT_EQ(-1, desc.chan);
EXPECT_EQ(1, desc.dims[0]);
EXPECT_EQ(3, desc.dims[1]);
EXPECT_EQ(299, desc.dims[2]);
EXPECT_EQ(299, desc.dims[3]);
EXPECT_TRUE(desc.isND());
}
TEST(GAPI_MetaDesc, VecMatDesc)
@ -45,13 +61,13 @@ TEST(GAPI_MetaDesc, VecMatDesc)
std::vector<cv::Mat> vec1 = {
cv::Mat(240, 320, CV_8U)};
const auto desc1 = cv::descr_of(vec1);
const auto desc1 = cv::descrs_of(vec1);
EXPECT_EQ((GMatDesc{CV_8U, 1, {320, 240}}), get<GMatDesc>(desc1[0]));
std::vector<cv::UMat> vec2 = {
cv::UMat(480, 640, CV_8UC3)};
const auto desc2 = cv::descr_of(vec2);
const auto desc2 = cv::descrs_of(vec2);
EXPECT_EQ((GMatDesc{CV_8U, 3, {640, 480}}), get<GMatDesc>(desc2[0]));
}
@ -61,7 +77,7 @@ TEST(GAPI_MetaDesc, VecOwnMatDesc)
cv::gapi::own::Mat(240, 320, CV_8U, nullptr),
cv::gapi::own::Mat(480, 640, CV_8UC3, nullptr)};
const auto desc = cv::gapi::own::descr_of(vec);
const auto desc = cv::gapi::own::descrs_of(vec);
EXPECT_EQ((GMatDesc{CV_8U, 1, {320, 240}}), get<GMatDesc>(desc[0]));
EXPECT_EQ((GMatDesc{CV_8U, 3, {640, 480}}), get<GMatDesc>(desc[1]));
}
@ -72,7 +88,7 @@ TEST(GAPI_MetaDesc, AdlVecOwnMatDesc)
cv::gapi::own::Mat(240, 320, CV_8U, nullptr),
cv::gapi::own::Mat(480, 640, CV_8UC3, nullptr)};
const auto desc = descr_of(vec);
const auto desc = descrs_of(vec);
EXPECT_EQ((GMatDesc{CV_8U, 1, {320, 240}}), get<GMatDesc>(desc[0]));
EXPECT_EQ((GMatDesc{CV_8U, 3, {640, 480}}), get<GMatDesc>(desc[1]));
}
@ -93,6 +109,38 @@ TEST(GAPI_MetaDesc, Compare_Not_Equal_MatDesc)
EXPECT_TRUE(desc1 != desc2);
}
TEST(GAPI_MetaDesc, Compare_Equal_MatDesc_ND)
{
const auto desc1 = cv::GMatDesc{CV_8U, {1,3,224,224}};
const auto desc2 = cv::GMatDesc{CV_8U, {1,3,224,224}};
EXPECT_TRUE(desc1 == desc2);
}
TEST(GAPI_MetaDesc, Compare_Not_Equal_MatDesc_ND_1)
{
const auto desc1 = cv::GMatDesc{CV_8U, {1,1000}};
const auto desc2 = cv::GMatDesc{CV_32F, {1,1000}};
EXPECT_TRUE(desc1 != desc2);
}
TEST(GAPI_MetaDesc, Compare_Not_Equal_MatDesc_ND_2)
{
const auto desc1 = cv::GMatDesc{CV_8U, {1,1000}};
const auto desc2 = cv::GMatDesc{CV_8U, {1,1400}};
EXPECT_TRUE(desc1 != desc2);
}
TEST(GAPI_MetaDesc, Compare_Not_Equal_MatDesc_ND_3)
{
const auto desc1 = cv::GMatDesc{CV_8U, {1,1000}};
const auto desc2 = cv::GMatDesc{CV_8U, 1, {32,32}};
EXPECT_TRUE(desc1 != desc2);
}
TEST(GAPI_MetaDesc, Compile_MatchMetaNumber_1)
{
cv::GMat in;

View File

@ -0,0 +1,281 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2019 Intel Corporation
#include "../test_precomp.hpp"
#ifdef HAVE_INF_ENGINE
#include <stdexcept>
////////////////////////////////////////////////////////////////////////////////
// FIXME: Suppress deprecation warnings for OpenVINO 2019R2+
// BEGIN {{{
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
#ifdef _MSC_VER
#pragma warning(disable: 4996) // was declared deprecated
#endif
#if defined(__GNUC__)
#pragma GCC visibility push(default)
#endif
#include <inference_engine.hpp>
#if defined(__GNUC__)
#pragma GCC visibility pop
#endif
// END }}}
////////////////////////////////////////////////////////////////////////////////
#include <ade/util/iota_range.hpp>
#include <opencv2/gapi/infer/ie.hpp>
#include <opencv2/gapi/infer/ie/util.hpp>
namespace opencv_test
{
namespace {
// FIXME: taken from DNN module
static void initDLDTDataPath()
{
#ifndef WINRT
static bool initialized = false;
if (!initialized)
{
const char* omzDataPath = getenv("OPENCV_OPEN_MODEL_ZOO_DATA_PATH");
if (omzDataPath)
cvtest::addDataSearchPath(omzDataPath);
const char* dnnDataPath = getenv("OPENCV_DNN_TEST_DATA_PATH");
if (dnnDataPath) {
// Add the dnnDataPath itself - G-API is using some images there directly
cvtest::addDataSearchPath(dnnDataPath);
cvtest::addDataSearchPath(dnnDataPath + std::string("/omz_intel_models"));
}
initialized = true;
}
#endif // WINRT
}
// FIXME: taken from the DNN module
void normAssert(cv::InputArray ref, cv::InputArray test,
const char *comment /*= ""*/,
double l1 = 0.00001, double lInf = 0.0001)
{
double normL1 = cvtest::norm(ref, test, cv::NORM_L1) / ref.getMat().total();
EXPECT_LE(normL1, l1) << comment;
double normInf = cvtest::norm(ref, test, cv::NORM_INF);
EXPECT_LE(normInf, lInf) << comment;
}
} // anonymous namespace
// TODO: Probably DNN/IE part can be further parametrized with a template
// NOTE: here ".." is used to leave the default "gapi/" search scope
TEST(TestAgeGenderIE, InferBasicTensor)
{
initDLDTDataPath();
const std::string path = "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013";
const auto topology_path = findDataFile(path + ".xml", false);
const auto weights_path = findDataFile(path + ".bin", false);
// Load IE network, initialize input data using that.
namespace IE = InferenceEngine;
cv::Mat in_mat;
cv::Mat gapi_age, gapi_gender;
IE::Blob::Ptr ie_age, ie_gender;
{
IE::CNNNetReader reader;
reader.ReadNetwork(topology_path);
reader.ReadWeights(weights_path);
auto net = reader.getNetwork();
const auto &iedims = net.getInputsInfo().begin()->second->getDims();
auto cvdims = cv::gapi::ie::util::to_ocv(iedims);
std::reverse(cvdims.begin(), cvdims.end());
in_mat.create(cvdims, CV_32F);
cv::randu(in_mat, -1, 1);
auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU");
auto plugin_net = plugin.LoadNetwork(net, {});
auto infer_request = plugin_net.CreateInferRequest();
infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat));
infer_request.Infer();
ie_age = infer_request.GetBlob("age_conv3");
ie_gender = infer_request.GetBlob("prob");
}
// Configure & run G-API
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
cv::GMat in;
cv::GMat age, gender;
std::tie(age, gender) = cv::gapi::infer<AgeGender>(in);
cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
auto pp = cv::gapi::ie::Params<AgeGender> {
topology_path, weights_path, "CPU"
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(in_mat), cv::gout(gapi_age, gapi_gender),
cv::compile_args(cv::gapi::networks(pp)));
// Validate with IE itself (avoid DNN module dependency here)
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" );
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
}
TEST(TestAgeGenderIE, InferBasicImage)
{
initDLDTDataPath();
const std::string path = "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013";
const auto topology_path = findDataFile(path + ".xml", false);
const auto weights_path = findDataFile(path + ".bin", false);
// FIXME: Ideally it should be an image from disk
// cv::Mat in_mat = cv::imread(findDataFile("grace_hopper_227.png"));
cv::Mat in_mat(cv::Size(320, 240), CV_8UC3);
cv::randu(in_mat, 0, 255);
cv::Mat gapi_age, gapi_gender;
// Load & run IE network
namespace IE = InferenceEngine;
IE::Blob::Ptr ie_age, ie_gender;
{
IE::CNNNetReader reader;
reader.ReadNetwork(topology_path);
reader.ReadWeights(weights_path);
auto net = reader.getNetwork();
auto &ii = net.getInputsInfo().at("data");
ii->setPrecision(IE::Precision::U8);
ii->setLayout(IE::Layout::NHWC);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU");
auto plugin_net = plugin.LoadNetwork(net, {});
auto infer_request = plugin_net.CreateInferRequest();
infer_request.SetBlob("data", cv::gapi::ie::util::to_ie(in_mat));
infer_request.Infer();
ie_age = infer_request.GetBlob("age_conv3");
ie_gender = infer_request.GetBlob("prob");
}
// Configure & run G-API
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
cv::GMat in;
cv::GMat age, gender;
std::tie(age, gender) = cv::gapi::infer<AgeGender>(in);
cv::GComputation comp(cv::GIn(in), cv::GOut(age, gender));
auto pp = cv::gapi::ie::Params<AgeGender> {
topology_path, weights_path, "CPU"
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(in_mat), cv::gout(gapi_age, gapi_gender),
cv::compile_args(cv::gapi::networks(pp)));
// Validate with IE itself (avoid DNN module dependency here)
normAssert(cv::gapi::ie::util::to_ocv(ie_age), gapi_age, "Test age output" );
normAssert(cv::gapi::ie::util::to_ocv(ie_gender), gapi_gender, "Test gender output");
}
TEST(TestAgeGenderIE, InferROIList)
{
initDLDTDataPath();
const std::string path = "Retail/object_attributes/age_gender/dldt/age-gender-recognition-retail-0013";
const auto topology_path = findDataFile(path + ".xml", false);
const auto weights_path = findDataFile(path + ".bin", false);
// FIXME: Ideally it should be an image from disk
// cv::Mat in_mat = cv::imread(findDataFile("grace_hopper_227.png"));
cv::Mat in_mat(cv::Size(640, 480), CV_8UC3);
cv::randu(in_mat, 0, 255);
std::vector<cv::Rect> rois = {
cv::Rect(cv::Point{ 0, 0}, cv::Size{80, 120}),
cv::Rect(cv::Point{50, 100}, cv::Size{96, 160}),
};
std::vector<cv::Mat> gapi_age, gapi_gender;
// Load & run IE network
namespace IE = InferenceEngine;
std::vector<cv::Mat> ie_age, ie_gender;
{
IE::CNNNetReader reader;
reader.ReadNetwork(topology_path);
reader.ReadWeights(weights_path);
auto net = reader.getNetwork();
auto &ii = net.getInputsInfo().at("data");
ii->setPrecision(IE::Precision::U8);
ii->setLayout(IE::Layout::NHWC);
ii->getPreProcess().setResizeAlgorithm(IE::RESIZE_BILINEAR);
auto plugin = IE::PluginDispatcher().getPluginByDevice("CPU");
auto plugin_net = plugin.LoadNetwork(net, {});
auto infer_request = plugin_net.CreateInferRequest();
auto frame_blob = cv::gapi::ie::util::to_ie(in_mat);
for (auto &&rc : rois) {
const auto ie_rc = IE::ROI {
0u
, static_cast<std::size_t>(rc.x)
, static_cast<std::size_t>(rc.y)
, static_cast<std::size_t>(rc.width)
, static_cast<std::size_t>(rc.height)
};
infer_request.SetBlob("data", IE::make_shared_blob(frame_blob, ie_rc));
infer_request.Infer();
using namespace cv::gapi::ie::util;
ie_age.push_back(to_ocv(infer_request.GetBlob("age_conv3")).clone());
ie_gender.push_back(to_ocv(infer_request.GetBlob("prob")).clone());
}
}
// Configure & run G-API
using AGInfo = std::tuple<cv::GMat, cv::GMat>;
G_API_NET(AgeGender, <AGInfo(cv::GMat)>, "test-age-gender");
cv::GArray<cv::Rect> rr;
cv::GMat in;
cv::GArray<cv::GMat> age, gender;
std::tie(age, gender) = cv::gapi::infer<AgeGender>(rr, in);
cv::GComputation comp(cv::GIn(in, rr), cv::GOut(age, gender));
auto pp = cv::gapi::ie::Params<AgeGender> {
topology_path, weights_path, "CPU"
}.cfgOutputLayers({ "age_conv3", "prob" });
comp.apply(cv::gin(in_mat, rois), cv::gout(gapi_age, gapi_gender),
cv::compile_args(cv::gapi::networks(pp)));
// Validate with IE itself (avoid DNN module dependency here)
ASSERT_EQ(2u, ie_age.size() );
ASSERT_EQ(2u, ie_gender.size());
ASSERT_EQ(2u, gapi_age.size() );
ASSERT_EQ(2u, gapi_gender.size());
normAssert(ie_age [0], gapi_age [0], "0: Test age output");
normAssert(ie_gender[0], gapi_gender[0], "0: Test gender output");
normAssert(ie_age [1], gapi_age [1], "1: Test age output");
normAssert(ie_gender[1], gapi_gender[1], "1: Test gender output");
}
} // namespace opencv_test
#endif // HAVE_INF_ENGINE

View File

@ -23,12 +23,12 @@ namespace
{
cv::GMat unaryOp(cv::GMat m)
{
return cv::GCall(cv::GKernel{"gapi.test.unaryop", nullptr, { GShape::GMAT } }).pass(m).yield(0);
return cv::GCall(cv::GKernel{"gapi.test.unaryop", "", nullptr, { GShape::GMAT } }).pass(m).yield(0);
}
cv::GMat binaryOp(cv::GMat m1, cv::GMat m2)
{
return cv::GCall(cv::GKernel{"gapi.test.binaryOp", nullptr, { GShape::GMAT } }).pass(m1, m2).yield(0);
return cv::GCall(cv::GKernel{"gapi.test.binaryOp", "", nullptr, { GShape::GMAT } }).pass(m1, m2).yield(0);
}
std::vector<ade::NodeHandle> collectOperations(const cv::gimpl::GModel::Graph& gr)

View File

@ -12,6 +12,7 @@
namespace opencv_test
{
using Mat = cv::gapi::own::Mat;
using Dims = std::vector<int>;
TEST(OwnMat, DefaultConstruction)
{
@ -22,6 +23,7 @@ TEST(OwnMat, DefaultConstruction)
ASSERT_EQ(m.cols, 0);
ASSERT_EQ(m.type(), 0);
ASSERT_EQ(m.depth(), 0);
ASSERT_TRUE(m.dims.empty());
}
TEST(OwnMat, Create)
@ -39,6 +41,25 @@ TEST(OwnMat, Create)
ASSERT_EQ(m.channels(), 1);
ASSERT_EQ(m.elemSize(), sizeof(uint8_t));
ASSERT_EQ(m.step, sizeof(uint8_t) * m.cols);
ASSERT_TRUE(m.dims.empty());
}
TEST(OwnMat, CreateND)
{
Dims dims = {1,1,32,32};
Mat m;
m.create(dims, CV_32F);
ASSERT_NE(nullptr , m.data );
ASSERT_EQ((cv::gapi::own::Size{0,0}), (cv::gapi::own::Size{m.cols, m.rows}));
ASSERT_EQ(static_cast<size_t>(dims[0]*dims[1]*dims[2]*dims[3]), m.total());
ASSERT_EQ(CV_32F , m.type() );
ASSERT_EQ(CV_32F , m.depth() );
ASSERT_EQ(-1 , m.channels());
ASSERT_EQ(sizeof(float) , m.elemSize());
ASSERT_EQ(0u , m.step );
ASSERT_EQ(dims , m.dims );
}
TEST(OwnMat, CreateOverload)
@ -56,7 +77,9 @@ TEST(OwnMat, CreateOverload)
ASSERT_EQ(m.channels(), 1);
ASSERT_EQ(m.elemSize(), sizeof(uint8_t));
ASSERT_EQ(m.step, sizeof(uint8_t) * m.cols);
ASSERT_TRUE(m.dims.empty());
}
TEST(OwnMat, Create3chan)
{
auto size = cv::Size{32,16};
@ -71,6 +94,7 @@ TEST(OwnMat, Create3chan)
ASSERT_EQ(m.channels(), 3);
ASSERT_EQ(m.elemSize(), 3 * sizeof(uint8_t));
ASSERT_EQ(m.step, 3* sizeof(uint8_t) * m.cols);
ASSERT_TRUE(m.dims.empty());
}
struct NonEmptyMat {
@ -91,7 +115,8 @@ namespace {
cv::Size{mat.cols, mat.rows},
mat.type(),
mat.depth(),
mat.channels()
mat.channels(),
mat.dims
);
};
@ -194,6 +219,23 @@ TEST(OwnMatConversion, WithStep)
<< (cvMat != cvMatFromOwn);
}
TEST(OwnMatConversion, WithND)
{
const Dims dims = {1,3,8,8};
std::vector<uint8_t> data(dims[0]*dims[1]*dims[2]*dims[3]);
for (size_t i = 0u; i < data.size(); i++)
{
data[i] = static_cast<uint8_t>(i);
}
cv::Mat cvMat(dims, CV_32S, data.data());
auto ownMat = to_own(cvMat);
auto cvMatFromOwn = cv::gapi::own::to_ocv(ownMat);
EXPECT_EQ(0, cv::countNonZero(cvMat != cvMatFromOwn))
<< cvMat << std::endl
<< (cvMat != cvMatFromOwn);
}
TEST(OwnMat, PtrWithStep)
{
constexpr int width = 8;
@ -242,6 +284,149 @@ TEST(OwnMat, CopyToWithStep)
<< (to_ocv(mat) != to_ocv(dst));
}
TEST(OwnMat, AssignNDtoRegular)
{
const auto sz = cv::gapi::own::Size{32,32};
const auto dims = Dims{1,3,224,224};
Mat a;
a.create(sz, CV_8U);
const auto *old_ptr = a.data;
ASSERT_NE(nullptr , a.data);
ASSERT_EQ(sz , (cv::gapi::own::Size{a.cols, a.rows}));
ASSERT_EQ(static_cast<size_t>(sz.width*sz.height), a.total());
ASSERT_EQ(CV_8U , a.type());
ASSERT_EQ(CV_8U , a.depth());
ASSERT_EQ(1 , a.channels());
ASSERT_EQ(sizeof(uint8_t), a.elemSize());
ASSERT_EQ(static_cast<size_t>(sz.width), a.step);
ASSERT_TRUE(a.dims.empty());
Mat b;
b.create(dims, CV_32F);
a = b;
ASSERT_NE(nullptr , a.data);
ASSERT_NE(old_ptr , a.data);
ASSERT_EQ((cv::gapi::own::Size{0,0}), (cv::gapi::own::Size{a.cols, a.rows}));
ASSERT_EQ(static_cast<size_t>(dims[0]*dims[1]*dims[2]*dims[3]), a.total());
ASSERT_EQ(CV_32F , a.type());
ASSERT_EQ(CV_32F , a.depth());
ASSERT_EQ(-1 , a.channels());
ASSERT_EQ(sizeof(float), a.elemSize());
ASSERT_EQ(0u , a.step);
ASSERT_EQ(dims , a.dims);
}
TEST(OwnMat, AssignRegularToND)
{
const auto sz = cv::gapi::own::Size{32,32};
const auto dims = Dims{1,3,224,224};
Mat a;
a.create(dims, CV_32F);
const auto *old_ptr = a.data;
ASSERT_NE(nullptr , a.data);
ASSERT_EQ((cv::gapi::own::Size{0,0}), (cv::gapi::own::Size{a.cols, a.rows}));
ASSERT_EQ(static_cast<size_t>(dims[0]*dims[1]*dims[2]*dims[3]), a.total());
ASSERT_EQ(CV_32F , a.type());
ASSERT_EQ(CV_32F , a.depth());
ASSERT_EQ(-1 , a.channels());
ASSERT_EQ(sizeof(float), a.elemSize());
ASSERT_EQ(0u , a.step);
ASSERT_EQ(dims , a.dims);
Mat b;
b.create(sz, CV_8U);
a = b;
ASSERT_NE(nullptr , a.data);
ASSERT_NE(old_ptr , a.data);
ASSERT_EQ(sz , (cv::gapi::own::Size{a.cols, a.rows}));
ASSERT_EQ(static_cast<size_t>(sz.width*sz.height), a.total());
ASSERT_EQ(CV_8U , a.type());
ASSERT_EQ(CV_8U , a.depth());
ASSERT_EQ(1 , a.channels());
ASSERT_EQ(sizeof(uint8_t), a.elemSize());
ASSERT_EQ(static_cast<size_t>(sz.width), a.step);
ASSERT_TRUE(a.dims.empty());
}
TEST(OwnMat, CopyNDtoRegular)
{
const auto sz = cv::gapi::own::Size{32,32};
const auto dims = Dims{1,3,224,224};
Mat a;
a.create(sz, CV_8U);
const auto *old_ptr = a.data;
ASSERT_NE(nullptr , a.data);
ASSERT_EQ(sz , (cv::gapi::own::Size{a.cols, a.rows}));
ASSERT_EQ(static_cast<size_t>(sz.width*sz.height), a.total());
ASSERT_EQ(CV_8U , a.type());
ASSERT_EQ(CV_8U , a.depth());
ASSERT_EQ(1 , a.channels());
ASSERT_EQ(sizeof(uint8_t), a.elemSize());
ASSERT_EQ(static_cast<size_t>(sz.width), a.step);
ASSERT_TRUE(a.dims.empty());
Mat b;
b.create(dims, CV_32F);
b.copyTo(a);
ASSERT_NE(nullptr , a.data);
ASSERT_NE(old_ptr , a.data);
ASSERT_NE(b.data , a.data);
ASSERT_EQ((cv::gapi::own::Size{0,0}), (cv::gapi::own::Size{a.cols, a.rows}));
ASSERT_EQ(static_cast<size_t>(dims[0]*dims[1]*dims[2]*dims[3]), a.total());
ASSERT_EQ(CV_32F , a.type());
ASSERT_EQ(CV_32F , a.depth());
ASSERT_EQ(-1 , a.channels());
ASSERT_EQ(sizeof(float), a.elemSize());
ASSERT_EQ(0u , a.step);
ASSERT_EQ(dims , a.dims);
}
TEST(OwnMat, CopyRegularToND)
{
const auto sz = cv::gapi::own::Size{32,32};
const auto dims = Dims{1,3,224,224};
Mat a;
a.create(dims, CV_32F);
const auto *old_ptr = a.data;
ASSERT_NE(nullptr , a.data);
ASSERT_EQ((cv::gapi::own::Size{0,0}), (cv::gapi::own::Size{a.cols, a.rows}));
ASSERT_EQ(static_cast<size_t>(dims[0]*dims[1]*dims[2]*dims[3]), a.total());
ASSERT_EQ(CV_32F , a.type());
ASSERT_EQ(CV_32F , a.depth());
ASSERT_EQ(-1 , a.channels());
ASSERT_EQ(sizeof(float), a.elemSize());
ASSERT_EQ(0u , a.step);
ASSERT_EQ(dims , a.dims);
Mat b;
b.create(sz, CV_8U);
b.copyTo(a);
ASSERT_NE(nullptr , a.data);
ASSERT_NE(old_ptr , a.data);
ASSERT_NE(b.data , a.data);
ASSERT_EQ(sz , (cv::gapi::own::Size{a.cols, a.rows}));
ASSERT_EQ(static_cast<size_t>(sz.width*sz.height), a.total());
ASSERT_EQ(CV_8U , a.type());
ASSERT_EQ(CV_8U , a.depth());
ASSERT_EQ(1 , a.channels());
ASSERT_EQ(sizeof(uint8_t), a.elemSize());
ASSERT_EQ(static_cast<size_t>(sz.width), a.step);
ASSERT_TRUE(a.dims.empty());
}
TEST(OwnMat, ScalarAssign32SC1)
{
constexpr int width = 8;
@ -304,6 +489,19 @@ TEST(OwnMat, ScalarAssign8UC1)
<< cmp_result_mat << std::endl;
}
TEST(OwnMat, ScalarAssignND)
{
std::vector<int> dims = {1,1000};
Mat m;
m.create(dims, CV_32F);
m = cv::gapi::own::Scalar{-1};
const float *ptr = reinterpret_cast<float*>(m.data);
for (auto i = 0u; i < m.total(); i++) {
EXPECT_EQ(-1.f, ptr[i]);
}
}
TEST(OwnMat, ScalarAssign8UC3)
{
constexpr auto cv_type = CV_8SC3;

View File

@ -14,6 +14,7 @@
#include <vector>
#include <opencv2/ts.hpp>
#include <opencv2/gapi.hpp>
#include <opencv2/gapi/imgproc.hpp>
#include <opencv2/gapi/core.hpp>
@ -25,5 +26,6 @@
#include <opencv2/gapi/operators.hpp>
#include <opencv2/gapi/fluid/imgproc.hpp>
#include <opencv2/gapi/fluid/core.hpp>
#include <opencv2/gapi/infer.hpp>
#endif // __OPENCV_GAPI_TEST_PRECOMP_HPP__