Merge pull request #19351 from TolyaTalamanov:at/python-backend

This commit is contained in:
Alexander Alekhin 2021-03-29 21:54:44 +00:00
commit 64072e44cc
9 changed files with 697 additions and 17 deletions

View File

@ -53,6 +53,7 @@ file(GLOB gapi_ext_hdrs
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/util/*.hpp" "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/util/*.hpp"
"${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/python/*.hpp"
) )
set(gapi_srcs set(gapi_srcs
@ -158,6 +159,7 @@ set(gapi_srcs
# Python bridge # Python bridge
src/backends/ie/bindings_ie.cpp src/backends/ie/bindings_ie.cpp
src/backends/python/gpythonbackend.cpp
) )
ocv_add_dispatched_file(backends/fluid/gfluidimgproc_func SSE4_1 AVX2) ocv_add_dispatched_file(backends/fluid/gfluidimgproc_func SSE4_1 AVX2)

View File

@ -645,7 +645,7 @@ Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref
@param ddepth optional depth of the output matrix. @param ddepth optional depth of the output matrix.
@sa sub, addWeighted @sa sub, addWeighted
*/ */
GAPI_EXPORTS GMat addC(const GMat& src1, const GScalar& c, int ddepth = -1); GAPI_EXPORTS_W GMat addC(const GMat& src1, const GScalar& c, int ddepth = -1);
//! @overload //! @overload
GAPI_EXPORTS GMat addC(const GScalar& c, const GMat& src1, int ddepth = -1); GAPI_EXPORTS GMat addC(const GScalar& c, const GMat& src1, int ddepth = -1);
@ -1945,7 +1945,7 @@ Gets dimensions from rectangle.
@param r Input rectangle. @param r Input rectangle.
@return Size (rectangle dimensions). @return Size (rectangle dimensions).
*/ */
GAPI_EXPORTS GOpaque<Size> size(const GOpaque<Rect>& r); GAPI_EXPORTS_W GOpaque<Size> size(const GOpaque<Rect>& r);
/** @brief Gets dimensions from MediaFrame. /** @brief Gets dimensions from MediaFrame.

View File

@ -1168,7 +1168,7 @@ Calculates the up-right bounding rectangle of a point set.
@param src Input 2D point set, stored in std::vector<cv::Point2i>. @param src Input 2D point set, stored in std::vector<cv::Point2i>.
*/ */
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2i>& src); GAPI_EXPORTS_W GOpaque<Rect> boundingRect(const GArray<Point2i>& src);
/** @overload /** @overload

View File

@ -0,0 +1,58 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2021 Intel Corporation
#ifndef OPENCV_GAPI_PYTHON_API_HPP
#define OPENCV_GAPI_PYTHON_API_HPP
#include <opencv2/gapi/gkernel.hpp> // GKernelPackage
#include <opencv2/gapi/own/exports.hpp> // GAPI_EXPORTS
namespace cv {
namespace gapi {
namespace python {
GAPI_EXPORTS cv::gapi::GBackend backend();
struct GPythonContext
{
const cv::GArgs &ins;
const cv::GMetaArgs &in_metas;
const cv::GTypesInfo &out_info;
};
using Impl = std::function<cv::GRunArgs(const GPythonContext&)>;
class GAPI_EXPORTS GPythonKernel
{
public:
GPythonKernel() = default;
GPythonKernel(Impl run);
cv::GRunArgs operator()(const GPythonContext& ctx);
private:
Impl m_run;
};
class GAPI_EXPORTS GPythonFunctor : public cv::gapi::GFunctor
{
public:
using Meta = cv::GKernel::M;
GPythonFunctor(const char* id, const Meta &meta, const Impl& impl);
GKernelImpl impl() const override;
gapi::GBackend backend() const override;
private:
GKernelImpl impl_;
};
} // namespace python
} // namespace gapi
} // namespace cv
#endif // OPENCV_GAPI_PYTHON_API_HPP

View File

@ -3,10 +3,13 @@
#ifdef HAVE_OPENCV_GAPI #ifdef HAVE_OPENCV_GAPI
#include <opencv2/gapi/cpu/gcpukernel.hpp>
#include <opencv2/gapi/python/python.hpp>
// NB: Python wrapper replaces :: with _ for classes // NB: Python wrapper replaces :: with _ for classes
using gapi_GKernelPackage = cv::gapi::GKernelPackage; using gapi_GKernelPackage = cv::gapi::GKernelPackage;
using gapi_GNetPackage = cv::gapi::GNetPackage; using gapi_GNetPackage = cv::gapi::GNetPackage;
using gapi_ie_PyParams = cv::gapi::ie::PyParams; using gapi_ie_PyParams = cv::gapi::ie::PyParams;
using gapi_wip_IStreamSource_Ptr = cv::Ptr<cv::gapi::wip::IStreamSource>; using gapi_wip_IStreamSource_Ptr = cv::Ptr<cv::gapi::wip::IStreamSource>;
using detail_ExtractArgsCallback = cv::detail::ExtractArgsCallback; using detail_ExtractArgsCallback = cv::detail::ExtractArgsCallback;
using detail_ExtractMetaCallback = cv::detail::ExtractMetaCallback; using detail_ExtractMetaCallback = cv::detail::ExtractMetaCallback;
@ -18,7 +21,7 @@ using GOpaque_int = cv::GOpaque<int>;
using GOpaque_double = cv::GOpaque<double>; using GOpaque_double = cv::GOpaque<double>;
using GOpaque_float = cv::GOpaque<double>; using GOpaque_float = cv::GOpaque<double>;
using GOpaque_string = cv::GOpaque<std::string>; using GOpaque_string = cv::GOpaque<std::string>;
using GOpaque_Point = cv::GOpaque<cv::Point>; using GOpaque_Point2i = cv::GOpaque<cv::Point>;
using GOpaque_Point2f = cv::GOpaque<cv::Point2f>; using GOpaque_Point2f = cv::GOpaque<cv::Point2f>;
using GOpaque_Size = cv::GOpaque<cv::Size>; using GOpaque_Size = cv::GOpaque<cv::Size>;
using GOpaque_Rect = cv::GOpaque<cv::Rect>; using GOpaque_Rect = cv::GOpaque<cv::Rect>;
@ -28,7 +31,7 @@ using GArray_int = cv::GArray<int>;
using GArray_double = cv::GArray<double>; using GArray_double = cv::GArray<double>;
using GArray_float = cv::GArray<double>; using GArray_float = cv::GArray<double>;
using GArray_string = cv::GArray<std::string>; using GArray_string = cv::GArray<std::string>;
using GArray_Point = cv::GArray<cv::Point>; using GArray_Point2i = cv::GArray<cv::Point>;
using GArray_Point2f = cv::GArray<cv::Point2f>; using GArray_Point2f = cv::GArray<cv::Point2f>;
using GArray_Size = cv::GArray<cv::Size>; using GArray_Size = cv::GArray<cv::Size>;
using GArray_Rect = cv::GArray<cv::Rect>; using GArray_Rect = cv::GArray<cv::Rect>;
@ -41,19 +44,19 @@ using GArray_GMat = cv::GArray<cv::GMat>;
// WA: Create using // WA: Create using
using std::string; using std::string;
template<> template <>
bool pyopencv_to(PyObject* obj, std::vector<GCompileArg>& value, const ArgInfo& info) bool pyopencv_to(PyObject* obj, std::vector<GCompileArg>& value, const ArgInfo& info)
{ {
return pyopencv_to_generic_vec(obj, value, info); return pyopencv_to_generic_vec(obj, value, info);
} }
template<> template <>
PyObject* pyopencv_from(const std::vector<GCompileArg>& value) PyObject* pyopencv_from(const std::vector<GCompileArg>& value)
{ {
return pyopencv_from_generic_vec(value); return pyopencv_from_generic_vec(value);
} }
template<> template <>
bool pyopencv_to(PyObject* obj, GRunArgs& value, const ArgInfo& info) bool pyopencv_to(PyObject* obj, GRunArgs& value, const ArgInfo& info)
{ {
return pyopencv_to_generic_vec(obj, value, info); return pyopencv_to_generic_vec(obj, value, info);
@ -267,10 +270,9 @@ static cv::detail::OpaqueRef extract_opaque_ref(PyObject* from, cv::detail::Opaq
UNSUPPORTED(SCALAR); UNSUPPORTED(SCALAR);
UNSUPPORTED(MAT); UNSUPPORTED(MAT);
UNSUPPORTED(DRAW_PRIM); UNSUPPORTED(DRAW_PRIM);
}
#undef HANDLE_CASE #undef HANDLE_CASE
#undef UNSUPPORTED #undef UNSUPPORTED
}
util::throw_error(std::logic_error("Unsupported type for GOpaqueT")); util::throw_error(std::logic_error("Unsupported type for GOpaqueT"));
} }
@ -302,8 +304,7 @@ static cv::detail::VectorRef extract_vector_ref(PyObject* from, cv::detail::Opaq
#undef HANDLE_CASE #undef HANDLE_CASE
#undef UNSUPPORTED #undef UNSUPPORTED
} }
util::throw_error(std::logic_error("Unsupported type for GArrayT"));
util::throw_error(std::logic_error("Unsupported type for GOpaqueT"));
} }
static cv::GRunArg extract_run_arg(const cv::GTypeInfo& info, PyObject* item) static cv::GRunArg extract_run_arg(const cv::GTypeInfo& info, PyObject* item)
@ -340,6 +341,7 @@ static cv::GRunArg extract_run_arg(const cv::GTypeInfo& info, PyObject* item)
} }
case cv::GShape::GFRAME: case cv::GShape::GFRAME:
{ {
// NB: Isn't supported yet.
break; break;
} }
} }
@ -391,7 +393,6 @@ static cv::GMetaArg extract_meta_arg(const cv::GTypeInfo& info, PyObject* item)
break; break;
} }
} }
util::throw_error(std::logic_error("Unsupported output shape")); util::throw_error(std::logic_error("Unsupported output shape"));
} }
@ -409,6 +410,134 @@ static cv::GMetaArgs extract_meta_args(const cv::GTypesInfo& info, PyObject* py_
return metas; return metas;
} }
inline PyObject* extract_opaque_value(const cv::GArg& value)
{
GAPI_Assert(value.kind != cv::detail::ArgKind::GOBJREF);
#define HANDLE_CASE(T, O) case cv::detail::OpaqueKind::CV_##T: \
{ \
return pyopencv_from(value.get<O>()); \
}
#define UNSUPPORTED(T) case cv::detail::OpaqueKind::CV_##T: break
switch (value.opaque_kind)
{
HANDLE_CASE(BOOL, bool);
HANDLE_CASE(INT, int);
HANDLE_CASE(DOUBLE, double);
HANDLE_CASE(FLOAT, float);
HANDLE_CASE(STRING, std::string);
HANDLE_CASE(POINT, cv::Point);
HANDLE_CASE(POINT2F, cv::Point2f);
HANDLE_CASE(SIZE, cv::Size);
HANDLE_CASE(RECT, cv::Rect);
HANDLE_CASE(SCALAR, cv::Scalar);
HANDLE_CASE(MAT, cv::Mat);
UNSUPPORTED(UNKNOWN);
UNSUPPORTED(UINT64);
UNSUPPORTED(DRAW_PRIM);
#undef HANDLE_CASE
#undef UNSUPPORTED
}
util::throw_error(std::logic_error("Unsupported kernel input type"));
}
static cv::GRunArgs run_py_kernel(PyObject* kernel,
const cv::gapi::python::GPythonContext &ctx)
{
const auto& ins = ctx.ins;
const auto& in_metas = ctx.in_metas;
const auto& out_info = ctx.out_info;
PyGILState_STATE gstate;
gstate = PyGILState_Ensure();
cv::GRunArgs outs;
try
{
int in_idx = 0;
PyObject* args = PyTuple_New(ins.size());
for (size_t i = 0; i < ins.size(); ++i)
{
// NB: If meta is monostate then object isn't associated with G-TYPE, so in case it
// kind matches with supported types do conversion from c++ to python, if not (CV_UNKNOWN)
// obtain PyObject* and pass as-is.
if (cv::util::holds_alternative<cv::util::monostate>(in_metas[i]))
{
PyTuple_SetItem(args, i,
ins[i].opaque_kind != cv::detail::OpaqueKind::CV_UNKNOWN ? extract_opaque_value(ins[i])
: ins[i].get<PyObject*>());
continue;
}
switch (in_metas[i].index())
{
case cv::GMetaArg::index_of<cv::GMatDesc>():
PyTuple_SetItem(args, i, pyopencv_from(ins[i].get<cv::Mat>()));
break;
case cv::GMetaArg::index_of<cv::GScalarDesc>():
PyTuple_SetItem(args, i, pyopencv_from(ins[i].get<cv::Scalar>()));
break;
case cv::GMetaArg::index_of<cv::GOpaqueDesc>():
PyTuple_SetItem(args, i, pyopencv_from(ins[i].get<cv::detail::OpaqueRef>()));
break;
case cv::GMetaArg::index_of<cv::GArrayDesc>():
PyTuple_SetItem(args, i, pyopencv_from(ins[i].get<cv::detail::VectorRef>()));
break;
case cv::GMetaArg::index_of<cv::GFrameDesc>():
util::throw_error(std::logic_error("GFrame isn't supported for custom operation"));
break;
}
++in_idx;
}
PyObject* result = PyObject_CallObject(kernel, args);
outs = out_info.size() == 1 ? cv::GRunArgs{extract_run_arg(out_info[0], result)}
: extract_run_args(out_info, result);
}
catch (...)
{
PyGILState_Release(gstate);
throw;
}
PyGILState_Release(gstate);
return outs;
}
// FIXME: Now it's impossible to obtain meta function from operation,
// because kernel connects to operation only by id (string).
static GMetaArgs empty_meta(const cv::GMetaArgs &, const cv::GArgs &) {
return {};
}
static PyObject* pyopencv_cv_gapi_kernels(PyObject* , PyObject* py_args, PyObject*)
{
using namespace cv;
gapi::GKernelPackage pkg;
Py_ssize_t size = PyTuple_Size(py_args);
for (int i = 0; i < size; ++i)
{
PyObject* pair = PyTuple_GetItem(py_args, i);
PyObject* kernel = PyTuple_GetItem(pair, 0);
std::string id;
if (!pyopencv_to(PyTuple_GetItem(pair, 1), id, ArgInfo("id", false)))
{
PyErr_SetString(PyExc_TypeError, "Failed to obtain: kernel id must be a string");
return NULL;
}
Py_INCREF(kernel);
gapi::python::GPythonFunctor f(id.c_str(),
empty_meta,
std::bind(run_py_kernel,
kernel,
std::placeholders::_1));
pkg.include(f);
}
return pyopencv_from(pkg);
}
static PyObject* pyopencv_cv_gin(PyObject*, PyObject* py_args, PyObject*) static PyObject* pyopencv_cv_gin(PyObject*, PyObject* py_args, PyObject*)
{ {
Py_INCREF(py_args); Py_INCREF(py_args);

View File

@ -15,6 +15,59 @@ pkgs = [
# ('plaidml', cv.gapi.core.plaidml.kernels()) # ('plaidml', cv.gapi.core.plaidml.kernels())
] ]
# Test output GMat.
def custom_add(img1, img2, dtype):
return cv.add(img1, img2)
# Test output GScalar.
def custom_mean(img):
return cv.mean(img)
# Test output tuple of GMat's.
def custom_split3(img):
# NB: cv.split return list but g-api requires tuple in multiple output case
return tuple(cv.split(img))
# Test output GOpaque.
def custom_size(img):
# NB: Take only H, W, because the operation should return cv::Size which is 2D.
return img.shape[:2]
# Test output GArray.
def custom_goodFeaturesToTrack(img, max_corners, quality_lvl,
min_distance, mask, block_sz,
use_harris_detector, k):
features = cv.goodFeaturesToTrack(img, max_corners, quality_lvl,
min_distance, mask=mask,
blockSize=block_sz,
useHarrisDetector=use_harris_detector, k=k)
# NB: The operation output is cv::GArray<cv::Pointf>, so it should be mapped
# to python paramaters like this: [(1.2, 3.4), (5.2, 3.2)], because the cv::Point2f
# according to opencv rules mapped to the tuple and cv::GArray<> mapped to the list.
# OpenCV returns np.array with shape (n_features, 1, 2), so let's to convert it to list
# tuples with size - n_features.
features = list(map(tuple, features.reshape(features.shape[0], -1)))
return features
# Test input scalar.
def custom_addC(img, sc, dtype):
# NB: dtype is just ignored in this implementation.
# More over from G-API kernel got scalar as tuples with 4 elements
# where the last element is equal to zero, just cut him for broadcasting.
return img + np.array(sc, dtype=np.uint8)[:-1]
# Test input opaque.
def custom_sizeR(rect):
# NB: rect - is tuple (x, y, h, w)
return (rect[2], rect[3])
# Test input array.
def custom_boundingRect(array):
# NB: OpenCV - numpy array (n_points x 2).
# G-API - array of tuples (n_points).
return cv.boundingRect(np.array(array))
class gapi_sample_pipelines(NewOpenCVTests): class gapi_sample_pipelines(NewOpenCVTests):
@ -40,5 +93,182 @@ class gapi_sample_pipelines(NewOpenCVTests):
'Failed on ' + pkg_name + ' backend') 'Failed on ' + pkg_name + ' backend')
def test_custom_mean(self):
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in_mat = cv.imread(img_path)
# OpenCV
expected = cv.mean(in_mat)
# G-API
g_in = cv.GMat()
g_out = cv.gapi.mean(g_in)
comp = cv.GComputation(g_in, g_out)
pkg = cv.gapi_wip_kernels((custom_mean, 'org.opencv.core.math.mean'))
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
# Comparison
self.assertEqual(expected, actual)
def test_custom_add(self):
sz = (3, 3)
in_mat1 = np.full(sz, 45, dtype=np.uint8)
in_mat2 = np.full(sz, 50 , dtype=np.uint8)
# OpenCV
expected = cv.add(in_mat1, in_mat2)
# G-API
g_in1 = cv.GMat()
g_in2 = cv.GMat()
g_out = cv.gapi.add(g_in1, g_in2)
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_out))
pkg = cv.gapi_wip_kernels((custom_add, 'org.opencv.core.math.add'))
actual = comp.apply(cv.gin(in_mat1, in_mat2), args=cv.compile_args(pkg))
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
def test_custom_size(self):
sz = (100, 150, 3)
in_mat = np.full(sz, 45, dtype=np.uint8)
# OpenCV
expected = (100, 150)
# G-API
g_in = cv.GMat()
g_sz = cv.gapi.streaming.size(g_in)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_sz))
pkg = cv.gapi_wip_kernels((custom_size, 'org.opencv.streaming.size'))
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
def test_custom_goodFeaturesToTrack(self):
# G-API
img_path = self.find_file('cv/face/david2.jpg', [os.environ.get('OPENCV_TEST_DATA_PATH')])
in_mat = cv.cvtColor(cv.imread(img_path), cv.COLOR_RGB2GRAY)
# NB: goodFeaturesToTrack configuration
max_corners = 50
quality_lvl = 0.01
min_distance = 10
block_sz = 3
use_harris_detector = True
k = 0.04
mask = None
# OpenCV
expected = cv.goodFeaturesToTrack(in_mat, max_corners, quality_lvl,
min_distance, mask=mask,
blockSize=block_sz, useHarrisDetector=use_harris_detector, k=k)
# G-API
g_in = cv.GMat()
g_out = cv.gapi.goodFeaturesToTrack(g_in, max_corners, quality_lvl,
min_distance, mask, block_sz, use_harris_detector, k)
comp = cv.GComputation(cv.GIn(g_in), cv.GOut(g_out))
pkg = cv.gapi_wip_kernels((custom_goodFeaturesToTrack, 'org.opencv.imgproc.feature.goodFeaturesToTrack'))
actual = comp.apply(cv.gin(in_mat), args=cv.compile_args(pkg))
# NB: OpenCV & G-API have different output types.
# OpenCV - numpy array with shape (num_points, 1, 2)
# G-API - list of tuples with size - num_points
# Comparison
self.assertEqual(0.0, cv.norm(expected.flatten(),
np.array(actual, dtype=np.float32).flatten(), cv.NORM_INF))
def test_custom_addC(self):
sz = (3, 3, 3)
in_mat = np.full(sz, 45, dtype=np.uint8)
sc = (50, 10, 20)
# Numpy reference, make array from sc to keep uint8 dtype.
expected = in_mat + np.array(sc, dtype=np.uint8)
# G-API
g_in = cv.GMat()
g_sc = cv.GScalar()
g_out = cv.gapi.addC(g_in, g_sc)
comp = cv.GComputation(cv.GIn(g_in, g_sc), cv.GOut(g_out))
pkg = cv.gapi_wip_kernels((custom_addC, 'org.opencv.core.math.addC'))
actual = comp.apply(cv.gin(in_mat, sc), args=cv.compile_args(pkg))
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
def test_custom_sizeR(self):
# x, y, h, w
roi = (10, 15, 100, 150)
expected = (100, 150)
# G-API
g_r = cv.GOpaqueT(cv.gapi.CV_RECT)
g_sz = cv.gapi.streaming.size(g_r)
comp = cv.GComputation(cv.GIn(g_r), cv.GOut(g_sz))
pkg = cv.gapi_wip_kernels((custom_sizeR, 'org.opencv.streaming.sizeR'))
actual = comp.apply(cv.gin(roi), args=cv.compile_args(pkg))
# cv.norm works with tuples ?
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
def test_custom_boundingRect(self):
points = [(0,0), (0,1), (1,0), (1,1)]
# OpenCV
expected = cv.boundingRect(np.array(points))
# G-API
g_pts = cv.GArrayT(cv.gapi.CV_POINT)
g_br = cv.gapi.boundingRect(g_pts)
comp = cv.GComputation(cv.GIn(g_pts), cv.GOut(g_br))
pkg = cv.gapi_wip_kernels((custom_boundingRect, 'org.opencv.imgproc.shape.boundingRectVector32S'))
actual = comp.apply(cv.gin(points), args=cv.compile_args(pkg))
# cv.norm works with tuples ?
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
def test_multiple_custom_kernels(self):
sz = (3, 3, 3)
in_mat1 = np.full(sz, 45, dtype=np.uint8)
in_mat2 = np.full(sz, 50 , dtype=np.uint8)
# OpenCV
expected = cv.mean(cv.split(cv.add(in_mat1, in_mat2))[1])
# G-API
g_in1 = cv.GMat()
g_in2 = cv.GMat()
g_sum = cv.gapi.add(g_in1, g_in2)
g_b, g_r, g_g = cv.gapi.split3(g_sum)
g_mean = cv.gapi.mean(g_b)
comp = cv.GComputation(cv.GIn(g_in1, g_in2), cv.GOut(g_mean))
pkg = cv.gapi_wip_kernels((custom_add , 'org.opencv.core.math.add'),
(custom_mean , 'org.opencv.core.math.mean'),
(custom_split3, 'org.opencv.core.transform.split3'))
actual = comp.apply(cv.gin(in_mat1, in_mat2), args=cv.compile_args(pkg))
self.assertEqual(0.0, cv.norm(expected, actual, cv.NORM_INF))
if __name__ == '__main__': if __name__ == '__main__':
NewOpenCVTests.bootstrap() NewOpenCVTests.bootstrap()

View File

@ -199,6 +199,5 @@ class test_gapi_streaming(NewOpenCVTests):
if proc_num_frames == max_num_frames: if proc_num_frames == max_num_frames:
break; break;
if __name__ == '__main__': if __name__ == '__main__':
NewOpenCVTests.bootstrap() NewOpenCVTests.bootstrap()

View File

@ -0,0 +1,261 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
//
// Copyright (C) 2021 Intel Corporation
#include <ade/util/zip_range.hpp> // zip_range, indexed
#include <opencv2/gapi/util/throw.hpp> // throw_error
#include <opencv2/gapi/python/python.hpp>
#include "api/gbackend_priv.hpp"
#include "backends/common/gbackend.hpp"
cv::gapi::python::GPythonKernel::GPythonKernel(cv::gapi::python::Impl run)
: m_run(run)
{
}
cv::GRunArgs cv::gapi::python::GPythonKernel::operator()(const cv::gapi::python::GPythonContext& ctx)
{
return m_run(ctx);
}
cv::gapi::python::GPythonFunctor::GPythonFunctor(const char* id,
const cv::gapi::python::GPythonFunctor::Meta &meta,
const cv::gapi::python::Impl& impl)
: gapi::GFunctor(id), impl_{GPythonKernel{impl}, meta}
{
}
cv::GKernelImpl cv::gapi::python::GPythonFunctor::impl() const
{
return impl_;
}
cv::gapi::GBackend cv::gapi::python::GPythonFunctor::backend() const
{
return cv::gapi::python::backend();
}
namespace {
struct PythonUnit
{
static const char *name() { return "PythonUnit"; }
cv::gapi::python::GPythonKernel kernel;
};
using PythonModel = ade::TypedGraph
< cv::gimpl::Op
, PythonUnit
>;
using ConstPythonModel = ade::ConstTypedGraph
< cv::gimpl::Op
, PythonUnit
>;
class GPythonExecutable final: public cv::gimpl::GIslandExecutable
{
virtual void run(std::vector<InObj> &&,
std::vector<OutObj> &&) override;
virtual bool allocatesOutputs() const override { return true; }
// Return an empty RMat since we will reuse the input.
// There is no need to allocate and copy 4k image here.
virtual cv::RMat allocate(const cv::GMatDesc&) const override { return {}; }
virtual bool canReshape() const override { return true; }
virtual void reshape(ade::Graph&, const cv::GCompileArgs&) override {
// Do nothing here
}
public:
GPythonExecutable(const ade::Graph &,
const std::vector<ade::NodeHandle> &);
const ade::Graph& m_g;
cv::gimpl::GModel::ConstGraph m_gm;
cv::gapi::python::GPythonKernel m_kernel;
ade::NodeHandle m_op;
cv::GTypesInfo m_out_info;
cv::GMetaArgs m_in_metas;
cv::gimpl::Mag m_res;
};
static cv::GArg packArg(cv::gimpl::Mag& m_res, const cv::GArg &arg)
{
// No API placeholders allowed at this point
// FIXME: this check has to be done somewhere in compilation stage.
GAPI_Assert( arg.kind != cv::detail::ArgKind::GMAT
&& arg.kind != cv::detail::ArgKind::GSCALAR
&& arg.kind != cv::detail::ArgKind::GARRAY
&& arg.kind != cv::detail::ArgKind::GOPAQUE
&& arg.kind != cv::detail::ArgKind::GFRAME);
if (arg.kind != cv::detail::ArgKind::GOBJREF)
{
// All other cases - pass as-is, with no transformations to GArg contents.
return arg;
}
GAPI_Assert(arg.kind == cv::detail::ArgKind::GOBJREF);
// Wrap associated CPU object (either host or an internal one)
// FIXME: object can be moved out!!! GExecutor faced that.
const cv::gimpl::RcDesc &ref = arg.get<cv::gimpl::RcDesc>();
switch (ref.shape)
{
case cv::GShape::GMAT: return cv::GArg(m_res.slot<cv::Mat>() [ref.id]);
case cv::GShape::GSCALAR: return cv::GArg(m_res.slot<cv::Scalar>()[ref.id]);
// Note: .at() is intentional for GArray and GOpaque as objects MUST be already there
// (and constructed by either bindIn/Out or resetInternal)
case cv::GShape::GARRAY: return cv::GArg(m_res.slot<cv::detail::VectorRef>().at(ref.id));
case cv::GShape::GOPAQUE: return cv::GArg(m_res.slot<cv::detail::OpaqueRef>().at(ref.id));
case cv::GShape::GFRAME: return cv::GArg(m_res.slot<cv::MediaFrame>().at(ref.id));
default:
cv::util::throw_error(std::logic_error("Unsupported GShape type"));
break;
}
}
static void writeBack(cv::GRunArg& arg, cv::GRunArgP& out)
{
switch (arg.index())
{
case cv::GRunArg::index_of<cv::Mat>():
{
auto& rmat = *cv::util::get<cv::RMat*>(out);
rmat = cv::make_rmat<cv::gimpl::RMatAdapter>(cv::util::get<cv::Mat>(arg));
break;
}
case cv::GRunArg::index_of<cv::Scalar>():
{
*cv::util::get<cv::Scalar*>(out) = cv::util::get<cv::Scalar>(arg);
break;
}
case cv::GRunArg::index_of<cv::detail::OpaqueRef>():
{
auto& oref = cv::util::get<cv::detail::OpaqueRef>(arg);
cv::util::get<cv::detail::OpaqueRef>(out).mov(oref);
break;
}
case cv::GRunArg::index_of<cv::detail::VectorRef>():
{
auto& vref = cv::util::get<cv::detail::VectorRef>(arg);
cv::util::get<cv::detail::VectorRef>(out).mov(vref);
break;
}
default:
GAPI_Assert(false && "Unsupported output type");
}
}
void GPythonExecutable::run(std::vector<InObj> &&input_objs,
std::vector<OutObj> &&output_objs)
{
const auto &op = m_gm.metadata(m_op).get<cv::gimpl::Op>();
for (auto& it : input_objs) cv::gimpl::magazine::bindInArg(m_res, it.first, it.second);
using namespace std::placeholders;
cv::GArgs inputs;
ade::util::transform(op.args,
std::back_inserter(inputs),
std::bind(&packArg, std::ref(m_res), _1));
cv::gapi::python::GPythonContext ctx{inputs, m_in_metas, m_out_info};
auto outs = m_kernel(ctx);
for (auto&& it : ade::util::zip(outs, output_objs))
{
writeBack(std::get<0>(it), std::get<1>(it).second);
}
}
class GPythonBackendImpl final: public cv::gapi::GBackend::Priv
{
virtual void unpackKernel(ade::Graph &graph,
const ade::NodeHandle &op_node,
const cv::GKernelImpl &impl) override
{
PythonModel gm(graph);
const auto &kernel = cv::util::any_cast<cv::gapi::python::GPythonKernel>(impl.opaque);
gm.metadata(op_node).set(PythonUnit{kernel});
}
virtual EPtr compile(const ade::Graph &graph,
const cv::GCompileArgs &,
const std::vector<ade::NodeHandle> &nodes) const override
{
return EPtr{new GPythonExecutable(graph, nodes)};
}
virtual bool controlsMerge() const override
{
return true;
}
virtual bool allowsMerge(const cv::gimpl::GIslandModel::Graph &,
const ade::NodeHandle &,
const ade::NodeHandle &,
const ade::NodeHandle &) const override
{
return false;
}
};
GPythonExecutable::GPythonExecutable(const ade::Graph& g,
const std::vector<ade::NodeHandle>& nodes)
: m_g(g), m_gm(m_g)
{
using namespace cv::gimpl;
const auto is_op = [this](const ade::NodeHandle &nh)
{
return m_gm.metadata(nh).get<NodeType>().t == NodeType::OP;
};
auto it = std::find_if(nodes.begin(), nodes.end(), is_op);
GAPI_Assert(it != nodes.end() && "No operators found for this island?!");
ConstPythonModel cag(m_g);
m_op = *it;
m_kernel = cag.metadata(m_op).get<PythonUnit>().kernel;
// Ensure this the only op in the graph
if (std::any_of(it+1, nodes.end(), is_op))
{
cv::util::throw_error
(std::logic_error
("Internal error: Python subgraph has multiple operations"));
}
m_out_info.reserve(m_op->outEdges().size());
for (const auto &e : m_op->outEdges())
{
const auto& out_data = m_gm.metadata(e->dstNode()).get<cv::gimpl::Data>();
m_out_info.push_back(cv::GTypeInfo{out_data.shape, out_data.kind, out_data.ctor});
}
const auto& op = m_gm.metadata(m_op).get<cv::gimpl::Op>();
m_in_metas.resize(op.args.size());
GAPI_Assert(m_op->inEdges().size() > 0);
for (const auto &in_eh : m_op->inEdges())
{
const auto& input_port = m_gm.metadata(in_eh).get<Input>().port;
const auto& input_nh = in_eh->srcNode();
const auto& input_meta = m_gm.metadata(input_nh).get<Data>().meta;
m_in_metas.at(input_port) = input_meta;
}
}
} // anonymous namespace
cv::gapi::GBackend cv::gapi::python::backend()
{
static cv::gapi::GBackend this_backend(std::make_shared<GPythonBackendImpl>());
return this_backend;
}

View File

@ -2199,6 +2199,7 @@ static PyMethodDef special_methods[] = {
#endif #endif
#ifdef HAVE_OPENCV_GAPI #ifdef HAVE_OPENCV_GAPI
{"GIn", CV_PY_FN_WITH_KW(pyopencv_cv_GIn), "GIn(...) -> GInputProtoArgs"}, {"GIn", CV_PY_FN_WITH_KW(pyopencv_cv_GIn), "GIn(...) -> GInputProtoArgs"},
{"gapi_wip_kernels", CV_PY_FN_WITH_KW(pyopencv_cv_gapi_kernels), "kernels(...) -> GKernelPackage"},
{"GOut", CV_PY_FN_WITH_KW(pyopencv_cv_GOut), "GOut(...) -> GOutputProtoArgs"}, {"GOut", CV_PY_FN_WITH_KW(pyopencv_cv_GOut), "GOut(...) -> GOutputProtoArgs"},
{"gin", CV_PY_FN_WITH_KW(pyopencv_cv_gin), "gin(...) -> ExtractArgsCallback"}, {"gin", CV_PY_FN_WITH_KW(pyopencv_cv_gin), "gin(...) -> ExtractArgsCallback"},
{"descr_of", CV_PY_FN_WITH_KW(pyopencv_cv_descr_of), "descr_of(...) -> ExtractMetaCallback"}, {"descr_of", CV_PY_FN_WITH_KW(pyopencv_cv_descr_of), "descr_of(...) -> ExtractMetaCallback"},