diff --git a/cmake/OpenCVPackaging.cmake b/cmake/OpenCVPackaging.cmake
index 96aaacc5d9..e90aabb893 100644
--- a/cmake/OpenCVPackaging.cmake
+++ b/cmake/OpenCVPackaging.cmake
@@ -123,7 +123,7 @@ endif()
set(STD_OPENCV_LIBS opencv-data)
set(STD_OPENCV_DEV libopencv-dev)
-foreach(module 3d calib core dnn features flann gapi highgui
+foreach(module 3d calib core dnn features flann highgui
imgcodecs imgproc ml objdetect
photo stereo stitching ts video videoio)
if(HAVE_opencv_${module})
diff --git a/cmake/vars/OPENCV_DISABLE_THREAD_SUPPORT.cmake b/cmake/vars/OPENCV_DISABLE_THREAD_SUPPORT.cmake
index 8f3a4c0048..7608375d61 100644
--- a/cmake/vars/OPENCV_DISABLE_THREAD_SUPPORT.cmake
+++ b/cmake/vars/OPENCV_DISABLE_THREAD_SUPPORT.cmake
@@ -21,7 +21,6 @@ ocv_update(BUILD_opencv_java OFF)
# <[thread|mutex|condition_variable|future]>` and linkage into
# `libpthread` to work.
ocv_update(BUILD_opencv_objdetect OFF)
-ocv_update(BUILD_opencv_gapi OFF)
ocv_update(BUILD_opencv_dnn OFF)
set(OPJ_USE_THREAD "OFF" CACHE INTERNAL "")
diff --git a/doc/tutorials/gapi/anisotropic_segmentation/pics/massif_export_gapi.png b/doc/tutorials/gapi/anisotropic_segmentation/pics/massif_export_gapi.png
deleted file mode 100644
index f378ea870e..0000000000
Binary files a/doc/tutorials/gapi/anisotropic_segmentation/pics/massif_export_gapi.png and /dev/null differ
diff --git a/doc/tutorials/gapi/anisotropic_segmentation/pics/massif_export_gapi_fluid.png b/doc/tutorials/gapi/anisotropic_segmentation/pics/massif_export_gapi_fluid.png
deleted file mode 100644
index 11bb83bf4c..0000000000
Binary files a/doc/tutorials/gapi/anisotropic_segmentation/pics/massif_export_gapi_fluid.png and /dev/null differ
diff --git a/doc/tutorials/gapi/anisotropic_segmentation/pics/massif_export_ocv.png b/doc/tutorials/gapi/anisotropic_segmentation/pics/massif_export_ocv.png
deleted file mode 100644
index 60b5492da8..0000000000
Binary files a/doc/tutorials/gapi/anisotropic_segmentation/pics/massif_export_ocv.png and /dev/null differ
diff --git a/doc/tutorials/gapi/anisotropic_segmentation/pics/result.jpg b/doc/tutorials/gapi/anisotropic_segmentation/pics/result.jpg
deleted file mode 100644
index e69cd690aa..0000000000
Binary files a/doc/tutorials/gapi/anisotropic_segmentation/pics/result.jpg and /dev/null differ
diff --git a/doc/tutorials/gapi/anisotropic_segmentation/pics/segm.gif b/doc/tutorials/gapi/anisotropic_segmentation/pics/segm.gif
deleted file mode 100644
index a984d9da36..0000000000
Binary files a/doc/tutorials/gapi/anisotropic_segmentation/pics/segm.gif and /dev/null differ
diff --git a/doc/tutorials/gapi/anisotropic_segmentation/pics/segm_fluid.gif b/doc/tutorials/gapi/anisotropic_segmentation/pics/segm_fluid.gif
deleted file mode 100644
index 2180b434d4..0000000000
Binary files a/doc/tutorials/gapi/anisotropic_segmentation/pics/segm_fluid.gif and /dev/null differ
diff --git a/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown b/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown
deleted file mode 100644
index 64b68e644c..0000000000
--- a/doc/tutorials/gapi/anisotropic_segmentation/porting_anisotropic_segmentation.markdown
+++ /dev/null
@@ -1,404 +0,0 @@
-# Porting anisotropic image segmentation on G-API {#tutorial_gapi_anisotropic_segmentation}
-
-@prev_tutorial{tutorial_gapi_interactive_face_detection}
-@next_tutorial{tutorial_gapi_face_beautification}
-
-[TOC]
-
-# Introduction {#gapi_anisotropic_intro}
-
-In this tutorial you will learn:
-* How an existing algorithm can be transformed into a G-API
- computation (graph);
-* How to inspect and profile G-API graphs;
-* How to customize graph execution without changing its code.
-
-This tutorial is based on @ref
-tutorial_anisotropic_image_segmentation_by_a_gst.
-
-# Quick start: using OpenCV backend {#gapi_anisotropic_start}
-
-Before we start, let's review the original algorithm implementation:
-
-@include cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp
-
-## Examining calcGST() {#gapi_anisotropic_calcgst}
-
-The function calcGST() is clearly an image processing pipeline:
-* It is just a sequence of operations over a number of cv::Mat;
-* No logic (conditionals) and loops involved in the code;
-* All functions operate on 2D images (like cv::Sobel, cv::multiply,
-cv::boxFilter, cv::sqrt, etc).
-
-Considering the above, calcGST() is a great candidate to start
-with. In the original code, its prototype is defined like this:
-
-@snippet cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp calcGST_proto
-
-With G-API, we can define it as follows:
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi.cpp calcGST_proto
-
-It is important to understand that the new G-API based version of
-calcGST() will just produce a compute graph, in contrast to its
-original version, which actually calculates the values. This is a
-principal difference -- G-API based functions like this are used to
-construct graphs, not to process the actual data.
-
-Let's start implementing calcGST() with calculation of \f$J\f$
-matrix. This is how the original code looks like:
-
-@snippet cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp calcJ_header
-
-Here we need to declare output objects for every new operation (see
-img as a result for cv::Mat::convertTo, imgDiffX and others as results for
-cv::Sobel and cv::multiply).
-
-The G-API analogue is listed below:
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi.cpp calcGST_header
-
-This snippet demonstrates the following syntactic difference between
-G-API and traditional OpenCV:
-* All standard G-API functions are by default placed in "cv::gapi"
-namespace;
-* G-API operations _return_ its results -- there's no need to pass
-extra "output" parameters to the functions.
-
-Note -- this code is also using `auto` -- types of intermediate objects
-like `img`, `imgDiffX`, and so on are inferred automatically by the
-C++ compiler. In this example, the types are determined by G-API
-operation return values which all are cv::GMat.
-
-G-API standard kernels are trying to follow OpenCV API conventions
-whenever possible -- so cv::gapi::sobel takes the same arguments as
-cv::Sobel, cv::gapi::mul follows cv::multiply, and so on (except
-having a return value).
-
-The rest of calcGST() function can be implemented the same
-way trivially. Below is its full source code:
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi.cpp calcGST
-
-## Running G-API graph {#gapi_anisotropic_running}
-
-After calcGST() is defined in G-API language, we can construct a graph
-based on it and finally run it -- pass input image and obtain
-result. Before we do it, let's have a look how original code looked
-like:
-
-@snippet cpp/tutorial_code/ImgProc/anisotropic_image_segmentation/anisotropic_image_segmentation.cpp main_extra
-
-G-API-based functions like calcGST() can't be applied to input data
-directly, since it is a _construction_ code, not the _processing_ code.
-In order to _run_ computations, a special object of class
-cv::GComputation needs to be created. This object wraps our G-API code
-(which is a composition of G-API data and operations) into a callable
-object, similar to C++11
-[std::function<>](https://en.cppreference.com/w/cpp/utility/functional/function).
-
-cv::GComputation class has a number of constructors which can be used
-to define a graph. Generally, user needs to pass graph boundaries
--- _input_ and _output_ objects, on which a GComputation is
-defined. Then G-API analyzes the call flow from _outputs_ to _inputs_
-and reconstructs the graph with operations in-between the specified
-boundaries. This may sound complex, however in fact the code looks
-like this:
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi.cpp main
-
-Note that this code slightly changes from the original one: forming up
-the resulting image is also a part of the pipeline (done with
-cv::gapi::addWeighted).
-
-Result of this G-API pipeline bit-exact matches the original one
-(given the same input image):
-
-
-
-## G-API initial version: full listing {#gapi_anisotropic_ocv}
-
-Below is the full listing of the initial anisotropic image
-segmentation port on G-API:
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi.cpp full_sample
-
-# Inspecting the initial version {#gapi_anisotropic_inspect}
-
-After we have got the initial working version of our algorithm working
-with G-API, we can use it to inspect and learn how G-API works. This
-chapter covers two aspects: understanding the graph structure, and
-memory profiling.
-
-## Understanding the graph structure {#gapi_anisotropic_inspect_graph}
-
-G-API stands for "Graph API", but did you mention any graphs in the
-above example? It was one of the initial design goals -- G-API was
-designed with expressions in mind to make adoption and porting process
-more straightforward. People _usually_ don't think in terms of
-_Nodes_ and _Edges_ when writing ordinary code, and so G-API, while
-being a Graph API, doesn't force its users to do that.
-
-However, a graph is still built implicitly when a cv::GComputation
-object is defined. It may be useful to inspect how the resulting graph
-looks like to check if it is generated correctly and if it really
-represents our algorithm. It is also useful to learn the structure of
-the graph to see if it has any redundancies.
-
-G-API allows to dump generated graphs to `.dot` files which then
-could be visualized with [Graphviz](https://www.graphviz.org/), a
-popular open graph visualization software.
-
-
-
-In order to dump our graph to a `.dot` file, set `GRAPH_DUMP_PATH` to a
-file name before running the application, e.g.:
-
- $ GRAPH_DUMP_PATH=segm.dot ./bin/example_tutorial_porting_anisotropic_image_segmentation_gapi
-
-Now this file can be visualized with a `dot` command like this:
-
- $ dot segm.dot -Tpng -o segm.png
-
-or viewed interactively with `xdot` (please refer to your
-distribution/operating system documentation on how to install these
-packages).
-
-
-
-The above diagram demonstrates a number of interesting aspects of
-G-API's internal algorithm representation:
-1. G-API underlying graph is a bipartite graph: it consists of
- _Operation_ and _Data_ nodes such that a _Data_ node can only be
- connected to an _Operation_ node, _Operation_ node can only be
- connected to a _Data_ node, and nodes of a single kind are never
- connected directly.
-2. Graph is directed - every edge in the graph has a direction.
-3. Graph "begins" and "ends" with a _Data_ kind of nodes.
-4. A _Data_ node can have only a single writer and multiple readers.
-5. An _Operation_ node may have multiple inputs, though every input
- must have an unique _port number_ (among inputs).
-6. An _Operation_ node may have multiple outputs, and every output
- must have an unique _port number_ (among outputs).
-
-## Measuring memory footprint {#gapi_anisotropic_memory_ocv}
-
-Let's measure and compare memory footprint of the algorithm in its two
-versions: G-API-based and OpenCV-based. At the moment, G-API version
-is also OpenCV-based since it fallbacks to OpenCV functions inside.
-
-On GNU/Linux, application memory footprint can be profiled with
-[Valgrind](http://valgrind.org/). On Debian/Ubuntu systems it can be
-installed like this (assuming you have administrator privileges):
-
- $ sudo apt-get install valgrind massif-visualizer
-
-Once installed, we can collect memory profiles easily for our two
-algorithm versions:
-
- $ valgrind --tool=massif --massif-out-file=ocv.out ./bin/example_tutorial_anisotropic_image_segmentation
- ==6101== Massif, a heap profiler
- ==6101== Copyright (C) 2003-2015, and GNU GPL'd, by Nicholas Nethercote
- ==6101== Using Valgrind-3.11.0 and LibVEX; rerun with -h for copyright info
- ==6101== Command: ./bin/example_tutorial_anisotropic_image_segmentation
- ==6101==
- ==6101==
- $ valgrind --tool=massif --massif-out-file=gapi.out ./bin/example_tutorial_porting_anisotropic_image_segmentation_gapi
- ==6117== Massif, a heap profiler
- ==6117== Copyright (C) 2003-2015, and GNU GPL'd, by Nicholas Nethercote
- ==6117== Using Valgrind-3.11.0 and LibVEX; rerun with -h for copyright info
- ==6117== Command: ./bin/example_tutorial_porting_anisotropic_image_segmentation_gapi
- ==6117==
- ==6117==
-
-Once done, we can inspect the collected profiles with
-[Massif Visualizer](https://github.com/KDE/massif-visualizer)
-(installed in the above step).
-
-Below is the visualized memory profile of the original OpenCV version
-of the algorithm:
-
-
-
-We see that memory is allocated as the application
-executes, reaching its peak in the calcGST() function; then the
-footprint drops as calcGST() completes its execution and all temporary
-buffers are freed. Massif reports us peak memory consumption of 7.6 MiB.
-
-Now let's have a look on the profile of G-API version:
-
-
-
-Once G-API computation is created and its execution starts, G-API
-allocates all required memory at once and then the memory profile
-remains flat until the termination of the program. Massif reports us
-peak memory consumption of 11.4 MiB.
-
-A reader may ask a right question at this point -- is G-API that bad?
-What is the reason in using it than?
-
-Hopefully, it is not. The reason why we see here an increased memory
-consumption is because the default naive OpenCV-based backend is used to
-execute this graph. This backend serves mostly for quick prototyping
-and debugging algorithms before offload/further optimization.
-
-This backend doesn't utilize any complex memory management strategies yet
-since it is not its point at the moment. In the following chapter,
-we'll learn about Fluid backend and see how the same G-API code can
-run in a completely different model (and the footprint shrunk to a
-number of kilobytes).
-
-# Backends and kernels {#gapi_anisotropic_backends}
-
-This chapter covers how a G-API computation can be executed in a
-special way -- e.g. offloaded to another device, or scheduled with a
-special intelligence. G-API is designed to make its graphs portable --
-it means that once a graph is defined in G-API terms, no changes
-should be required in it if we want to run it on CPU or on GPU or on
-both devices at once. [G-API High-level overview](@ref gapi_hld) and
-[G-API Kernel API](@ref gapi_kernel_api) shed more light on technical
-details which make it possible. In this chapter, we will utilize G-API
-Fluid backend to make our graph cache-efficient on CPU.
-
-G-API defines _backend_ as the lower-level entity which knows how to
-run kernels. Backends may have (and, in fact, do have) different
-_Kernel APIs_ which are used to program and integrate kernels for that
-backends. In this context, _kernel_ is an implementation of an
-_operation_, which is defined on the top API level (see
-G_TYPED_KERNEL() macro).
-
-Backend is a thing which is aware of device & platform specifics, and
-which executes its kernels with keeping that specifics in mind. For
-example, there may be [Halide](http://halide-lang.org/) backend which
-allows to write (implement) G-API operations in Halide language and
-then generate functional Halide code for portions of G-API graph which
-map well there.
-
-## Running a graph with a Fluid backend {#gapi_anisotropic_fluid}
-
-OpenCV 4.0 is bundled with two G-API backends -- the default "OpenCV"
-which we just used, and a special "Fluid" backend.
-
-Fluid backend reorganizes the execution to save memory and to achieve
-near-perfect cache locality, implementing so-called "streaming" model
-of execution.
-
-In order to start using Fluid kernels, we need first to include
-appropriate header files (which are not included by default):
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi_fluid.cpp fluid_includes
-
-Once these headers are included, we can form up a new _kernel package_
-and specify it to G-API:
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi_fluid.cpp kernel_pkg
-
-In G-API, kernels (or operation implementations) are objects. Kernels are
-organized into collections, or _kernel packages_, represented by class
-cv::GKernelPackage. The main purpose of a kernel package is to
-capture which kernels we would like to use in our graph, and pass it
-as a _graph compilation option_:
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi_fluid.cpp kernel_pkg_use
-
-Traditional OpenCV is logically divided into modules, with every
-module providing a set of functions. In G-API, there are also
-"modules" which are represented as kernel packages provided by a
-particular backend. In this example, we pass Fluid kernel packages to
-G-API to utilize appropriate Fluid functions in our graph.
-
-Kernel packages are combinable -- in the above example, we take "Core"
-and "ImgProc" Fluid kernel packages and combine it into a single
-one. See documentation reference on cv::gapi::combine.
-
-If no kernel packages are specified in options, G-API is using
-_default_ package which consists of default OpenCV implementations and
-thus G-API graphs are executed via OpenCV functions by default. OpenCV
-backend provides broader functional coverage than any other
-backend. If a kernel package is specified, like in this example, then
-it is being combined with the _default_.
-It means that user-specified implementations will replace default implementations in case of
-conflict.
-
-
-
-
-## Troubleshooting and customization {#gapi_anisotropic_trouble}
-
-After the above modifications, (in OpenCV 4.0) the app should crash
-with a message like this:
-
-```
-$ ./bin/example_tutorial_porting_anisotropic_image_segmentation_gapi_fluid
-terminate called after throwing an instance of 'std::logic_error'
- what(): .../modules/gapi/src/backends/fluid/gfluidimgproc.cpp:436: Assertion kernelSize.width == 3 && kernelSize.height == 3 in function run failed
-
-Aborted (core dumped)
-```
-
-Fluid backend has a number of limitations in OpenCV 4.0 (see this
-[wiki page](https://github.com/opencv/opencv/wiki/Graph-API) for a
-more up-to-date status). In particular, the Box filter used in this
-sample supports only static 3x3 kernel size.
-
-We can overcome this problem easily by avoiding G-API using Fluid
-version of Box filter kernel in this sample. It can be done by
-removing the appropriate kernel from the kernel package we've just
-created:
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi_fluid.cpp kernel_hotfix
-
-Now this kernel package doesn't have _any_ implementation of Box
-filter kernel interface (specified as a template parameter). As
-described above, G-API will fall-back to OpenCV to run this kernel
-now. The resulting code with this change now looks like:
-
-@snippet cpp/tutorial_code/gapi/porting_anisotropic_image_segmentation/porting_anisotropic_image_segmentation_gapi_fluid.cpp kernel_pkg_proper
-
-Let's examine the memory profile for this sample after we switched to
-Fluid backend. Now it looks like this:
-
-
-
-Now the tool reports 4.7MiB -- and we just changed a few lines in our
-code, without modifying the graph itself! It is a ~2.4X improvement of
-the previous G-API result, and ~1.6X improvement of the original OpenCV
-version.
-
-Let's also examine how the internal representation of the graph now
-looks like. Dumping the graph into `.dot` would result into a
-visualization like this:
-
-
-
-This graph doesn't differ structurally from its previous version (in
-terms of operations and data objects), though a changed layout (on the
-left side of the dump) is easily noticeable.
-
-The visualization reflects how G-API deals with mixed graphs, also
-called _heterogeneous_ graphs. The majority of operations in this
-graph are implemented with Fluid backend, but Box filters are executed
-by the OpenCV backend. One can easily see that the graph is partitioned
-(with rectangles). G-API groups connected operations based on their
-affinity, forming _subgraphs_ (or _islands_ in G-API terminology), and
-our top-level graph becomes a composition of multiple smaller
-subgraphs. Every backend determines how its subgraph (island) is
-executed, so Fluid backend optimizes out memory where possible, and
-six intermediate buffers accessed by OpenCV Box filters are allocated
-fully and can't be optimized out.
-
-
-
-
-
-# Conclusion {#gapi_tutor_conclusion}
-
-This tutorial demonstrates what G-API is and what its key design
-concepts are, how an algorithm can be ported to G-API, and
-how to utilize graph model benefits after that.
-
-In OpenCV 4.0, G-API is still in its inception stage -- it is more a
-foundation for all future work, though ready for use even now.
-
-Further, this tutorial will be extended with new chapters on custom
-kernels programming, parallelism, and more.
diff --git a/doc/tutorials/gapi/face_beautification/face_beautification.markdown b/doc/tutorials/gapi/face_beautification/face_beautification.markdown
deleted file mode 100644
index 1ceb416c99..0000000000
--- a/doc/tutorials/gapi/face_beautification/face_beautification.markdown
+++ /dev/null
@@ -1,442 +0,0 @@
-# Implementing a face beautification algorithm with G-API {#tutorial_gapi_face_beautification}
-
-@prev_tutorial{tutorial_gapi_anisotropic_segmentation}
-
-[TOC]
-
-# Introduction {#gapi_fb_intro}
-
-In this tutorial you will learn:
-* Basics of a sample face beautification algorithm;
-* How to infer different networks inside a pipeline with G-API;
-* How to run a G-API pipeline on a video stream.
-
-## Prerequisites {#gapi_fb_prerec}
-
-This sample requires:
-- PC with GNU/Linux or Microsoft Windows (Apple macOS is supported but
- was not tested);
-- OpenCV 4.2 or later built with Intel® Distribution of [OpenVINO™
- Toolkit](https://docs.openvinotoolkit.org/) (building with [Intel®
- TBB](https://www.threadingbuildingblocks.org/intel-tbb-tutorial) is
- a plus);
-- The following topologies from OpenVINO™ Toolkit [Open Model
- Zoo](https://github.com/opencv/open_model_zoo):
- - `face-detection-adas-0001`;
- - `facial-landmarks-35-adas-0002`.
-
-## Face beautification algorithm {#gapi_fb_algorithm}
-
-We will implement a simple face beautification algorithm using a
-combination of modern Deep Learning techniques and traditional
-Computer Vision. The general idea behind the algorithm is to make
-face skin smoother while preserving face features like eyes or a
-mouth contrast. The algorithm identifies parts of the face using a DNN
-inference, applies different filters to the parts found, and then
-combines it into the final result using basic image arithmetics:
-
-\dot
-strict digraph Pipeline {
- node [shape=record fontname=Helvetica fontsize=10 style=filled color="#4c7aa4" fillcolor="#5b9bd5" fontcolor="white"];
- edge [color="#62a8e7"];
- ordering="out";
- splines=ortho;
- rankdir=LR;
-
- input [label="Input"];
- fd [label="Face\ndetector"];
- bgMask [label="Generate\nBG mask"];
- unshMask [label="Unsharp\nmask"];
- bilFil [label="Bilateral\nfilter"];
- shMask [label="Generate\nsharp mask"];
- blMask [label="Generate\nblur mask"];
- mul_1 [label="*" fontsize=24 shape=circle labelloc=b];
- mul_2 [label="*" fontsize=24 shape=circle labelloc=b];
- mul_3 [label="*" fontsize=24 shape=circle labelloc=b];
-
- subgraph cluster_0 {
- style=dashed
- fontsize=10
- ld [label="Landmarks\ndetector"];
- label="for each face"
- }
-
- sum_1 [label="+" fontsize=24 shape=circle];
- out [label="Output"];
-
- temp_1 [style=invis shape=point width=0];
- temp_2 [style=invis shape=point width=0];
- temp_3 [style=invis shape=point width=0];
- temp_4 [style=invis shape=point width=0];
- temp_5 [style=invis shape=point width=0];
- temp_6 [style=invis shape=point width=0];
- temp_7 [style=invis shape=point width=0];
- temp_8 [style=invis shape=point width=0];
- temp_9 [style=invis shape=point width=0];
-
- input -> temp_1 [arrowhead=none]
- temp_1 -> fd -> ld
- ld -> temp_4 [arrowhead=none]
- temp_4 -> bgMask
- bgMask -> mul_1 -> sum_1 -> out
-
- temp_4 -> temp_5 -> temp_6 [arrowhead=none constraint=none]
- ld -> temp_2 -> temp_3 [style=invis constraint=none]
-
- temp_1 -> {unshMask, bilFil}
- fd -> unshMask [style=invis constraint=none]
- unshMask -> bilFil [style=invis constraint=none]
-
- bgMask -> shMask [style=invis constraint=none]
- shMask -> blMask [style=invis constraint=none]
- mul_1 -> mul_2 [style=invis constraint=none]
- temp_5 -> shMask -> mul_2
- temp_6 -> blMask -> mul_3
-
- unshMask -> temp_2 -> temp_5 [style=invis]
- bilFil -> temp_3 -> temp_6 [style=invis]
-
- mul_2 -> temp_7 [arrowhead=none]
- mul_3 -> temp_8 [arrowhead=none]
-
- temp_8 -> temp_7 [arrowhead=none constraint=none]
- temp_7 -> sum_1 [constraint=none]
-
- unshMask -> mul_2 [constraint=none]
- bilFil -> mul_3 [constraint=none]
- temp_1 -> mul_1 [constraint=none]
-}
-\enddot
-
-Briefly the algorithm is described as follows:
-- Input image \f$I\f$ is passed to unsharp mask and bilateral filters
- (\f$U\f$ and \f$L\f$ respectively);
-- Input image \f$I\f$ is passed to an SSD-based face detector;
-- SSD result (a \f$[1 \times 1 \times 200 \times 7]\f$ blob) is parsed
- and converted to an array of faces;
-- Every face is passed to a landmarks detector;
-- Based on landmarks found for every face, three image masks are
- generated:
- - A background mask \f$b\f$ -- indicating which areas from the
- original image to keep as-is;
- - A face part mask \f$p\f$ -- identifying regions to preserve
- (sharpen).
- - A face skin mask \f$s\f$ -- identifying regions to blur;
-- The final result \f$O\f$ is a composition of features above
- calculated as \f$O = b*I + p*U + s*L\f$.
-
-Generating face element masks based on a limited set of features (just
-35 per face, including all its parts) is not very trivial and is
-described in the sections below.
-
-# Constructing a G-API pipeline {#gapi_fb_pipeline}
-
-## Declaring Deep Learning topologies {#gapi_fb_decl_nets}
-
-This sample is using two DNN detectors. Every network takes one input
-and produces one output. In G-API, networks are defined with macro
-G_API_NET():
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp net_decl
-
-To get more information, see
-[Declaring Deep Learning topologies](@ref gapi_ifd_declaring_nets)
-described in the "Face Analytics pipeline" tutorial.
-
-## Describing the processing graph {#gapi_fb_ppline}
-
-The code below generates a graph for the algorithm above:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp ppl
-
-The resulting graph is a mixture of G-API's standard operations,
-user-defined operations (namespace `custom::`), and DNN inference.
-The generic function `cv::gapi::infer<>()` allows to trigger inference
-within the pipeline; networks to infer are specified as template
-parameters. The sample code is using two versions of `cv::gapi::infer<>()`:
-- A frame-oriented one is used to detect faces on the input frame.
-- An ROI-list oriented one is used to run landmarks inference on a
- list of faces -- this version produces an array of landmarks per
- every face.
-
-More on this in "Face Analytics pipeline"
-([Building a GComputation](@ref gapi_ifd_gcomputation) section).
-
-## Unsharp mask in G-API {#gapi_fb_unsh}
-
-The unsharp mask \f$U\f$ for image \f$I\f$ is defined as:
-
-\f[U = I - s * L(M(I)),\f]
-
-where \f$M()\f$ is a median filter, \f$L()\f$ is the Laplace operator,
-and \f$s\f$ is a strength coefficient. While G-API doesn't provide
-this function out-of-the-box, it is expressed naturally with the
-existing G-API operations:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp unsh
-
-Note that the code snipped above is a regular C++ function defined
-with G-API types. Users can write functions like this to simplify
-graph construction; when called, this function just puts the relevant
-nodes to the pipeline it is used in.
-
-# Custom operations {#gapi_fb_proc}
-
-The face beautification graph is using custom operations
-extensively. This chapter focuses on the most interesting kernels,
-refer to [G-API Kernel API](@ref gapi_kernel_api) for general
-information on defining operations and implementing kernels in G-API.
-
-## Face detector post-processing {#gapi_fb_face_detect}
-
-A face detector output is converted to an array of faces with the
-following kernel:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp vec_ROI
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp fd_pp
-
-## Facial landmarks post-processing {#gapi_fb_landm_detect}
-
-The algorithm infers locations of face elements (like the eyes, the mouth
-and the head contour itself) using a generic facial landmarks detector
-(details)
-from OpenVINO™ Open Model Zoo. However, the detected landmarks as-is are not
-enough to generate masks --- this operation requires regions of interest on
-the face represented by closed contours, so some interpolation is applied to
-get them. This landmarks
-processing and interpolation is performed by the following kernel:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp ld_pp_cnts
-
-The kernel takes two arrays of denormalized landmarks coordinates and
-returns an array of elements' closed contours and an array of faces'
-closed contours; in other words, outputs are, the first, an array of
-contours of image areas to be sharpened and, the second, another one
-to be smoothed.
-
-Here and below `Contour` is a vector of points.
-
-### Getting an eye contour {#gapi_fb_ld_eye}
-
-Eye contours are estimated with the following function:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp ld_pp_incl
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp ld_pp_eye
-
-Briefly, this function restores the bottom side of an eye by a
-half-ellipse based on two points in left and right eye
-corners. In fact, `cv::ellipse2Poly()` is used to approximate the eye region, and
-the function only defines ellipse parameters based on just two points:
-- The ellipse center and the \f$X\f$ half-axis calculated by two eye Points;
-- The \f$Y\f$ half-axis calculated according to the assumption that an average
-eye width is \f$1/3\f$ of its length;
-- The start and the end angles which are 0 and 180 (refer to
- `cv::ellipse()` documentation);
-- The angle delta: how much points to produce in the contour;
-- The inclination angle of the axes.
-
-The use of the `atan2()` instead of just `atan()` in function
-`custom::getLineInclinationAngleDegrees()` is essential as it allows to
-return a negative value depending on the `x` and the `y` signs so we
-can get the right angle even in case of upside-down face arrangement
-(if we put the points in the right order, of course).
-
-### Getting a forehead contour {#gapi_fb_ld_fhd}
-
-The function approximates the forehead contour:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp ld_pp_fhd
-
-As we have only jaw points in our detected landmarks, we have to get a
-half-ellipse based on three points of a jaw: the leftmost, the
-rightmost and the lowest one. The jaw width is assumed to be equal to the
-forehead width and the latter is calculated using the left and the
-right points. Speaking of the \f$Y\f$ axis, we have no points to get
-it directly, and instead assume that the forehead height is about \f$2/3\f$
-of the jaw height, which can be figured out from the face center (the
-middle between the left and right points) and the lowest jaw point.
-
-## Drawing masks {#gapi_fb_masks_drw}
-
-When we have all the contours needed, we are able to draw masks:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp msk_ppline
-
-The steps to get the masks are:
-* the "sharp" mask calculation:
- * fill the contours that should be sharpened;
- * blur that to get the "sharp" mask (`mskSharpG`);
-* the "bilateral" mask calculation:
- * fill all the face contours fully;
- * blur that;
- * subtract areas which intersect with the "sharp" mask --- and get the
- "bilateral" mask (`mskBlurFinal`);
-* the background mask calculation:
- * add two previous masks
- * set all non-zero pixels of the result as 255 (by `cv::gapi::threshold()`)
- * revert the output (by `cv::gapi::bitwise_not`) to get the background
- mask (`mskNoFaces`).
-
-# Configuring and running the pipeline {#gapi_fb_comp_args}
-
-Once the graph is fully expressed, we can finally compile it and run
-on real data. G-API graph compilation is the stage where the G-API
-framework actually understands which kernels and networks to use. This
-configuration happens via G-API compilation arguments.
-
-## DNN parameters {#gapi_fb_comp_args_net}
-
-This sample is using OpenVINO™ Toolkit Inference Engine backend for DL
-inference, which is configured the following way:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp net_param
-
-Every `cv::gapi::ie::Params<>` object is related to the network
-specified in its template argument. We should pass there the network
-type we have defined in `G_API_NET()` in the early beginning of the
-tutorial.
-
-Network parameters are then wrapped in `cv::gapi::NetworkPackage`:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp netw
-
-More details in "Face Analytics Pipeline"
-([Configuring the pipeline](@ref gapi_ifd_configuration) section).
-
-## Kernel packages {#gapi_fb_comp_args_kernels}
-
-In this example we use a lot of custom kernels, in addition to that we
-use Fluid backend to optimize out memory for G-API's standard kernels
-where applicable. The resulting kernel package is formed like this:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp kern_pass_1
-
-## Compiling the streaming pipeline {#gapi_fb_compiling}
-
-G-API optimizes execution for video streams when compiled in the
-"Streaming" mode.
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp str_comp
-
-More on this in "Face Analytics Pipeline"
-([Configuring the pipeline](@ref gapi_ifd_configuration) section).
-
-## Running the streaming pipeline {#gapi_fb_running}
-
-In order to run the G-API streaming pipeline, all we need is to
-specify the input video source, call
-`cv::GStreamingCompiled::start()`, and then fetch the pipeline
-processing results:
-
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp str_src
-@snippet cpp/tutorial_code/gapi/face_beautification/face_beautification.cpp str_loop
-
-Once results are ready and can be pulled from the pipeline we display
-it on the screen and handle GUI events.
-
-See [Running the pipeline](@ref gapi_ifd_running) section
-in the "Face Analytics Pipeline" tutorial for more details.
-
-# Conclusion {#gapi_fb_cncl}
-
-The tutorial has two goals: to show the use of brand new features of
-G-API introduced in OpenCV 4.2, and give a basic understanding on a
-sample face beautification algorithm.
-
-The result of the algorithm application:
-
-
-
-On the test machine (Intel® Core™ i7-8700) the G-API-optimized video
-pipeline outperforms its serial (non-pipelined) version by a factor of
-**2.7** -- meaning that for such a non-trivial graph, the proper
-pipelining can bring almost 3x increase in performance.
-
-
diff --git a/doc/tutorials/gapi/face_beautification/pics/example.jpg b/doc/tutorials/gapi/face_beautification/pics/example.jpg
deleted file mode 100644
index 99a538f78c..0000000000
Binary files a/doc/tutorials/gapi/face_beautification/pics/example.jpg and /dev/null differ
diff --git a/doc/tutorials/gapi/interactive_face_detection/interactive_face_detection.markdown b/doc/tutorials/gapi/interactive_face_detection/interactive_face_detection.markdown
deleted file mode 100644
index 27916b4176..0000000000
--- a/doc/tutorials/gapi/interactive_face_detection/interactive_face_detection.markdown
+++ /dev/null
@@ -1,355 +0,0 @@
-# Face analytics pipeline with G-API {#tutorial_gapi_interactive_face_detection}
-
-@next_tutorial{tutorial_gapi_anisotropic_segmentation}
-
-[TOC]
-
-# Overview {#gapi_ifd_intro}
-
-In this tutorial you will learn:
-* How to integrate Deep Learning inference in a G-API graph;
-* How to run a G-API graph on a video stream and obtain data from it.
-
-# Prerequisites {#gapi_ifd_prereq}
-
-This sample requires:
-- PC with GNU/Linux or Microsoft Windows (Apple macOS is supported but
- was not tested);
-- OpenCV 4.2 or later built with Intel® Distribution of [OpenVINO™
- Toolkit](https://docs.openvinotoolkit.org/) (building with [Intel®
- TBB](https://www.threadingbuildingblocks.org/intel-tbb-tutorial) is
- a plus);
-- The following topologies from OpenVINO™ Toolkit [Open Model
- Zoo](https://github.com/opencv/open_model_zoo):
- - `face-detection-adas-0001`;
- - `age-gender-recognition-retail-0013`;
- - `emotions-recognition-retail-0003`.
-
-# Introduction: why G-API {#gapi_ifd_why}
-
-Many computer vision algorithms run on a video stream rather than on
-individual images. Stream processing usually consists of multiple
-steps -- like decode, preprocessing, detection, tracking,
-classification (on detected objects), and visualization -- forming a
-*video processing pipeline*. Moreover, many these steps of such
-pipeline can run in parallel -- modern platforms have different
-hardware blocks on the same chip like decoders and GPUs, and extra
-accelerators can be plugged in as extensions, like Intel® Movidius™
-Neural Compute Stick for deep learning offload.
-
-Given all this manifold of options and a variety in video analytics
-algorithms, managing such pipelines effectively quickly becomes a
-problem. For sure it can be done manually, but this approach doesn't
-scale: if a change is required in the algorithm (e.g. a new pipeline
-step is added), or if it is ported on a new platform with different
-capabilities, the whole pipeline needs to be re-optimized.
-
-Starting with version 4.2, OpenCV offers a solution to this
-problem. OpenCV G-API now can manage Deep Learning inference (a
-cornerstone of any modern analytics pipeline) with a traditional
-Computer Vision as well as video capturing/decoding, all in a single
-pipeline. G-API takes care of pipelining itself -- so if the algorithm
-or platform changes, the execution model adapts to it automatically.
-
-# Pipeline overview {#gapi_ifd_overview}
-
-Our sample application is based on ["Interactive Face Detection"] demo
-from OpenVINO™ Toolkit Open Model Zoo. A simplified pipeline consists
-of the following steps:
-1. Image acquisition and decode;
-2. Detection with preprocessing;
-3. Classification with preprocessing for every detected object with
- two networks;
-4. Visualization.
-
-\dot
-digraph pipeline {
- node [shape=record fontname=Helvetica fontsize=10 style=filled color="#4c7aa4" fillcolor="#5b9bd5" fontcolor="white"];
- edge [color="#62a8e7"];
- splines=ortho;
-
- rankdir = LR;
- subgraph cluster_0 {
- color=invis;
- capture [label="Capture\nDecode"];
- resize [label="Resize\nConvert"];
- detect [label="Detect faces"];
- capture -> resize -> detect
- }
-
- subgraph cluster_1 {
- graph[style=dashed];
-
- subgraph cluster_2 {
- color=invis;
- temp_4 [style=invis shape=point width=0];
- postproc_1 [label="Crop\nResize\nConvert"];
- age_gender [label="Classify\nAge/gender"];
- postproc_1 -> age_gender [constraint=true]
- temp_4 -> postproc_1 [constraint=none]
- }
-
- subgraph cluster_3 {
- color=invis;
- postproc_2 [label="Crop\nResize\nConvert"];
- emo [label="Classify\nEmotions"];
- postproc_2 -> emo [constraint=true]
- }
- label="(for each face)";
- }
-
- temp_1 [style=invis shape=point width=0];
- temp_2 [style=invis shape=point width=0];
- detect -> temp_1 [arrowhead=none]
- temp_1 -> postproc_1
-
- capture -> {temp_4, temp_2} [arrowhead=none constraint=false]
- temp_2 -> postproc_2
-
- temp_1 -> temp_2 [arrowhead=none constraint=false]
-
- temp_3 [style=invis shape=point width=0];
- show [label="Visualize\nDisplay"];
-
- {age_gender, emo} -> temp_3 [arrowhead=none]
- temp_3 -> show
-}
-\enddot
-
-# Constructing a pipeline {#gapi_ifd_constructing}
-
-Constructing a G-API graph for a video streaming case does not differ
-much from a [regular usage](@ref gapi_example) of G-API -- it is still
-about defining graph *data* (with cv::GMat, cv::GScalar, and
-cv::GArray) and *operations* over it. Inference also becomes an
-operation in the graph, but is defined in a little bit different way.
-
-## Declaring Deep Learning topologies {#gapi_ifd_declaring_nets}
-
-In contrast with traditional CV functions (see [core] and [imgproc])
-where G-API declares distinct operations for every function, inference
-in G-API is a single generic operation cv::gapi::infer<>. As usual, it
-is just an interface and it can be implemented in a number of ways under
-the hood. In OpenCV 4.2, only OpenVINO™ Inference Engine-based backend
-is available, and OpenCV's own DNN module-based backend is to come.
-
-cv::gapi::infer<> is _parametrized_ by the details of a topology we are
-going to execute. Like operations, topologies in G-API are strongly
-typed and are defined with a special macro G_API_NET():
-
-@snippet cpp/tutorial_code/gapi/age_gender_emotion_recognition/age_gender_emotion_recognition.cpp G_API_NET
-
-Similar to how operations are defined with G_API_OP(), network
-description requires three parameters:
-1. A type name. Every defined topology is declared as a distinct C++
- type which is used further in the program -- see below;
-2. A `std::function<>`-like API signature. G-API traits networks as
- regular "functions" which take and return data. Here network
- `Faces` (a detector) takes a cv::GMat and returns a cv::GMat, while
- network `AgeGender` is known to provide two outputs (age and gender
- blobs, respectively) -- so its has a `std::tuple<>` as a return
- type.
-3. A topology name -- can be any non-empty string, G-API is using
- these names to distinguish networks inside. Names should be unique
- in the scope of a single graph.
-
-## Building a GComputation {#gapi_ifd_gcomputation}
-
-Now the above pipeline is expressed in G-API like this:
-
-@snippet cpp/tutorial_code/gapi/age_gender_emotion_recognition/age_gender_emotion_recognition.cpp GComputation
-
-Every pipeline starts with declaring empty data objects -- which act
-as inputs to the pipeline. Then we call a generic cv::gapi::infer<>
-specialized to `Faces` detection network. cv::gapi::infer<> inherits its
-signature from its template parameter -- and in this case it expects
-one input cv::GMat and produces one output cv::GMat.
-
-In this sample we use a pre-trained SSD-based network and its output
-needs to be parsed to an array of detections (object regions of
-interest, ROIs). It is done by a custom operation `custom::PostProc`,
-which returns an array of rectangles (of type `cv::GArray`)
-back to the pipeline. This operation also filters out results by a
-confidence threshold -- and these details are hidden in the kernel
-itself. Still, at the moment of graph construction we operate with
-interfaces only and don't need actual kernels to express the pipeline
--- so the implementation of this post-processing will be listed later.
-
-After detection result output is parsed to an array of objects, we can run
-classification on any of those. G-API doesn't support syntax for
-in-graph loops like `for_each()` yet, but instead cv::gapi::infer<>
-comes with a special list-oriented overload.
-
-User can call cv::gapi::infer<> with a cv::GArray as the first
-argument, so then G-API assumes it needs to run the associated network
-on every rectangle from the given list of the given frame (second
-argument). Result of such operation is also a list -- a cv::GArray of
-cv::GMat.
-
-Since `AgeGender` network itself produces two outputs, it's output
-type for a list-based version of cv::gapi::infer is a tuple of
-arrays. We use `std::tie()` to decompose this input into two distinct
-objects.
-
-`Emotions` network produces a single output so its list-based
-inference's return type is `cv::GArray`.
-
-# Configuring the pipeline {#gapi_ifd_configuration}
-
-G-API strictly separates construction from configuration -- with the
-idea to keep algorithm code itself platform-neutral. In the above
-listings we only declared our operations and expressed the overall
-data flow, but didn't even mention that we use OpenVINO™. We only
-described *what* we do, but not *how* we do it. Keeping these two
-aspects clearly separated is the design goal for G-API.
-
-Platform-specific details arise when the pipeline is *compiled* --
-i.e. is turned from a declarative to an executable form. The way *how*
-to run stuff is specified via compilation arguments, and new
-inference/streaming features are no exception from this rule.
-
-G-API is built on backends which implement interfaces (see
-[Architecture] and [Kernels] for details) -- thus cv::gapi::infer<> is
-a function which can be implemented by different backends. In OpenCV
-4.2, only OpenVINO™ Inference Engine backend for inference is
-available. Every inference backend in G-API has to provide a special
-parameterizable structure to express *backend-specific* neural network
-parameters -- and in this case, it is cv::gapi::ie::Params:
-
-@snippet cpp/tutorial_code/gapi/age_gender_emotion_recognition/age_gender_emotion_recognition.cpp Param_Cfg
-
-Here we define three parameter objects: `det_net`, `age_net`, and
-`emo_net`. Every object is a cv::gapi::ie::Params structure
-parametrization for each particular network we use. On a compilation
-stage, G-API automatically matches network parameters with their
-cv::gapi::infer<> calls in graph using this information.
-
-Regardless of the topology, every parameter structure is constructed
-with three string arguments -- specific to the OpenVINO™ Inference
-Engine:
-1. Path to the topology's intermediate representation (.xml file);
-2. Path to the topology's model weights (.bin file);
-3. Device where to run -- "CPU", "GPU", and others -- based on your
-OpenVINO™ Toolkit installation.
-These arguments are taken from the command-line parser.
-
-Once networks are defined and custom kernels are implemented, the
-pipeline is compiled for streaming:
-
-@snippet cpp/tutorial_code/gapi/age_gender_emotion_recognition/age_gender_emotion_recognition.cpp Compile
-
-cv::GComputation::compileStreaming() triggers a special video-oriented
-form of graph compilation where G-API is trying to optimize
-throughput. Result of this compilation is an object of special type
-cv::GStreamingCompiled -- in contrast to a traditional callable
-cv::GCompiled, these objects are closer to media players in their
-semantics.
-
-@note There is no need to pass metadata arguments describing the
-format of the input video stream in
-cv::GComputation::compileStreaming() -- G-API figures automatically
-what are the formats of the input vector and adjusts the pipeline to
-these formats on-the-fly. User still can pass metadata there as with
-regular cv::GComputation::compile() in order to fix the pipeline to
-the specific input format.
-
-# Running the pipeline {#gapi_ifd_running}
-
-Pipelining optimization is based on processing multiple input video
-frames simultaneously, running different steps of the pipeline in
-parallel. This is why it works best when the framework takes full
-control over the video stream.
-
-The idea behind streaming API is that user specifies an *input source*
-to the pipeline and then G-API manages its execution automatically
-until the source ends or user interrupts the execution. G-API pulls
-new image data from the source and passes it to the pipeline for
-processing.
-
-Streaming sources are represented by the interface
-cv::gapi::wip::IStreamSource. Objects implementing this interface may
-be passed to `GStreamingCompiled` as regular inputs via `cv::gin()`
-helper function. In OpenCV 4.2, only one streaming source is allowed
-per pipeline -- this requirement will be relaxed in the future.
-
-OpenCV comes with a great class cv::VideoCapture and by default G-API
-ships with a stream source class based on it --
-cv::gapi::wip::GCaptureSource. Users can implement their own
-streaming sources e.g. using [VAAPI] or other Media or Networking
-APIs.
-
-Sample application specifies the input source as follows:
-
-@snippet cpp/tutorial_code/gapi/age_gender_emotion_recognition/age_gender_emotion_recognition.cpp Source
-
-Please note that a GComputation may still have multiple inputs like
-cv::GMat, cv::GScalar, or cv::GArray objects. User can pass their
-respective host-side types (cv::Mat, cv::Scalar, std::vector<>) in the
-input vector as well, but in Streaming mode these objects will create
-"endless" constant streams. Mixing a real video source stream and a
-const data stream is allowed.
-
-Running a pipeline is easy -- just call
-cv::GStreamingCompiled::start() and fetch your data with blocking
-cv::GStreamingCompiled::pull() or non-blocking
-cv::GStreamingCompiled::try_pull(); repeat until the stream ends:
-
-@snippet cpp/tutorial_code/gapi/age_gender_emotion_recognition/age_gender_emotion_recognition.cpp Run
-
-The above code may look complex but in fact it handles two modes --
-with and without graphical user interface (GUI):
-- When a sample is running in a "headless" mode (`--pure` option is
- set), this code simply pulls data from the pipeline with the
- blocking `pull()` until it ends. This is the most performant mode of
- execution.
-- When results are also displayed on the screen, the Window System
- needs to take some time to refresh the window contents and handle
- GUI events. In this case, the demo pulls data with a non-blocking
- `try_pull()` until there is no more data available (but it does not
- mark end of the stream -- just means new data is not ready yet), and
- only then displays the latest obtained result and refreshes the
- screen. Reducing the time spent in GUI with this trick increases the
- overall performance a little bit.
-
-# Comparison with serial mode {#gapi_ifd_comparison}
-
-The sample can also run in a serial mode for a reference and
-benchmarking purposes. In this case, a regular
-cv::GComputation::compile() is used and a regular single-frame
-cv::GCompiled object is produced; the pipelining optimization is not
-applied within G-API; it is the user responsibility to acquire image
-frames from cv::VideoCapture object and pass those to G-API.
-
-@snippet cpp/tutorial_code/gapi/age_gender_emotion_recognition/age_gender_emotion_recognition.cpp Run_Serial
-
-On a test machine (Intel® Core™ i5-6600), with OpenCV built with
-[Intel® TBB]
-support, detector network assigned to CPU, and classifiers to iGPU,
-the pipelined sample outperformes the serial one by the factor of
-1.36x (thus adding +36% in overall throughput).
-
-# Conclusion {#gapi_ifd_conclusion}
-
-G-API introduces a technological way to build and optimize hybrid
-pipelines. Switching to a new execution model does not require changes
-in the algorithm code expressed with G-API -- only the way how graph
-is triggered differs.
-
-# Listing: post-processing kernel {#gapi_ifd_pp}
-
-G-API gives an easy way to plug custom code into the pipeline even if
-it is running in a streaming mode and processing tensor
-data. Inference results are represented by multi-dimensional cv::Mat
-objects so accessing those is as easy as with a regular DNN module.
-
-The OpenCV-based SSD post-processing kernel is defined and implemented in this
-sample as follows:
-
-@snippet cpp/tutorial_code/gapi/age_gender_emotion_recognition/age_gender_emotion_recognition.cpp Postproc
-
-["Interactive Face Detection"]: https://github.com/opencv/open_model_zoo/tree/master/demos/interactive_face_detection_demo
-[core]: @ref gapi_core
-[imgproc]: @ref gapi_imgproc
-[Architecture]: @ref gapi_hld
-[Kernels]: @ref gapi_kernel_api
-[VAAPI]: https://01.org/vaapi
diff --git a/doc/tutorials/gapi/oak_devices/oak_devices.markdown b/doc/tutorials/gapi/oak_devices/oak_devices.markdown
deleted file mode 100644
index 6046cdef25..0000000000
--- a/doc/tutorials/gapi/oak_devices/oak_devices.markdown
+++ /dev/null
@@ -1,26 +0,0 @@
-Using DepthAI Hardware / OAK depth sensors {#tutorial_gapi_oak_devices}
-=======================================================================
-
-@tableofcontents
-
-@prev_tutorial{tutorial_gapi_face_beautification}
-
-
-
-Depth sensors compatible with Luxonis DepthAI library are supported through OpenCV Graph API (or G-API) module. RGB image and some other formats of output can be retrieved by using familiar interface of G-API module.
-
-In order to use DepthAI sensor with OpenCV you should do the following preliminary steps:
--# Install Luxonis DepthAI library [depthai-core](https://github.com/luxonis/depthai-core).
-
--# Configure OpenCV with DepthAI library support by setting `WITH_OAK` flag in CMake. If DepthAI library is found in install folders OpenCV will be built with depthai-core (see a status `WITH_OAK` in CMake log).
-
--# Build OpenCV.
-
-Source code
------------
-
-You can find source code how to process heterogeneous graphs in the `modules/gapi/samples/oak_basic_infer.cpp` of the OpenCV source code library.
-
-@add_toggle_cpp
- @include modules/gapi/samples/oak_basic_infer.cpp
-@end_toggle
diff --git a/doc/tutorials/gapi/oak_devices/pics/oak.jpg b/doc/tutorials/gapi/oak_devices/pics/oak.jpg
deleted file mode 100644
index 9ad30ce1e9..0000000000
Binary files a/doc/tutorials/gapi/oak_devices/pics/oak.jpg and /dev/null differ
diff --git a/doc/tutorials/gapi/table_of_content_gapi.markdown b/doc/tutorials/gapi/table_of_content_gapi.markdown
deleted file mode 100644
index 1b33172b9e..0000000000
--- a/doc/tutorials/gapi/table_of_content_gapi.markdown
+++ /dev/null
@@ -1,53 +0,0 @@
-# Graph API (gapi module) {#tutorial_table_of_content_gapi}
-
-In this section you will learn about graph-based image processing and
-how G-API module can be used for that.
-
-- @subpage tutorial_gapi_interactive_face_detection
-
- *Languages:* C++
-
- *Compatibility:* \> OpenCV 4.2
-
- *Author:* Dmitry Matveev
-
- This tutorial illustrates how to build a hybrid video processing
- pipeline with G-API where Deep Learning and image processing are
- combined effectively to maximize the overall throughput. This
- sample requires Intel® distribution of OpenVINO™ Toolkit version
- 2019R2 or later.
-
-- @subpage tutorial_gapi_anisotropic_segmentation
-
- *Languages:* C++
-
- *Compatibility:* \> OpenCV 4.0
-
- *Author:* Dmitry Matveev
-
- This is an end-to-end tutorial where an existing sample algorithm
- is ported on G-API, covering the basic intuition behind this
- transition process, and examining benefits which a graph model
- brings there.
-
-- @subpage tutorial_gapi_face_beautification
-
- *Languages:* C++
-
- *Compatibility:* \> OpenCV 4.2
-
- *Author:* Orest Chura
-
- In this tutorial we build a complex hybrid Computer Vision/Deep
- Learning video processing pipeline with G-API.
-
-
-- @subpage tutorial_gapi_oak_devices
-
- *Languages:* C++
-
- *Compatibility:* \> OpenCV 4.6
-
- *Author:* Alessandro de Oliveira Faria (A.K.A. CABELO)
-
- In this tutorial we showed how to use the Luxonis DepthAI library with G-API.
diff --git a/doc/tutorials/introduction/env_reference/env_reference.markdown b/doc/tutorials/introduction/env_reference/env_reference.markdown
index ee725f8542..c902a95c46 100644
--- a/doc/tutorials/introduction/env_reference/env_reference.markdown
+++ b/doc/tutorials/introduction/env_reference/env_reference.markdown
@@ -301,26 +301,6 @@ Some external dependencies can be detached into a dynamic library, which will be
| OPENCV_TEST_CAMERA_%d_FPS | num | | fps to set for N-th camera (0-based index) (waitAny_V4L test) |
-## gapi
-| name | type | default | description |
-|------|------|---------|-------------|
-| ⭐ GRAPH_DUMP_PATH | file path | | dump graph (dot format) |
-| PIPELINE_MODELS_PATH | dir path | | pipeline_modeling_tool sample application uses this var |
-| OPENCV_GAPI_INFERENCE_ENGINE_CORE_LIFETIME_WORKAROUND | bool | true (Windows, Apple), false (others) | similar to OPENCV_DNN_INFERENCE_ENGINE_CORE_LIFETIME_WORKAROUND |
-
-### gapi tests/samples
-| name | type | default | description |
-|------|------|---------|-------------|
-| PLAIDML_DEVICE | string | | specific to PlaidML backend test |
-| PLAIDML_TARGET | string | | specific to PlaidML backend test |
-| OPENCV_GAPI_ONNX_MODEL_PATH | dir path | | search location for ONNX models test |
-| OPENCV_TEST_FREETYPE_FONT_PATH | file path | | location of TrueType font for one of tests |
-
-### Links:
-* https://github.com/opencv/opencv/wiki/Using-G-API-with-OpenVINO-Toolkit
-* https://github.com/opencv/opencv/wiki/Using-G-API-with-MS-ONNX-Runtime
-
-
## highgui
| name | type | default | description |
diff --git a/doc/tutorials/tutorials.markdown b/doc/tutorials/tutorials.markdown
index ffa5d5d3c6..48f1292971 100644
--- a/doc/tutorials/tutorials.markdown
+++ b/doc/tutorials/tutorials.markdown
@@ -9,7 +9,6 @@ OpenCV Tutorials {#tutorial_root}
- @subpage tutorial_table_of_content_objdetect - INSERT OBJDETECT MODULE INFO
- @subpage tutorial_table_of_content_features - feature detectors, descriptors and matching framework
- @subpage tutorial_table_of_content_dnn - infer neural networks using built-in _dnn_ module
-- @subpage tutorial_table_of_content_gapi - graph-based approach to computer vision algorithms building
- @subpage tutorial_table_of_content_other - other modules (stitching, video, photo)
- @subpage tutorial_table_of_content_ios - running OpenCV on an iDevice
- @subpage tutorial_table_of_content_3d - 3d objects processing and visualisation
diff --git a/modules/gapi/CMakeLists.txt b/modules/gapi/CMakeLists.txt
deleted file mode 100644
index 6757c853bd..0000000000
--- a/modules/gapi/CMakeLists.txt
+++ /dev/null
@@ -1,440 +0,0 @@
-# FIXME: Rework standalone build in more generic maner
-# (Restructure directories, add common pass, etc)
-if(NOT DEFINED OPENCV_INITIAL_PASS)
- cmake_minimum_required(VERSION 3.13)
- project(gapi_standalone)
- include("cmake/standalone.cmake")
- return()
-endif()
-
-if(NOT TARGET ade)
- # can't build G-API because of the above reasons
- ocv_module_disable(gapi)
- return()
-endif()
-
-if(TARGET ocv.3rdparty.openvino)
- # TODO: remove OPENCV_GAPI_INF_ENGINE option
- set(initial_value ON)
- if(DEFINED OPENCV_GAPI_INF_ENGINE)
- set(initial_value ${OPENCV_GAPI_INF_ENGINE})
- message(WARNING "OPENCV_GAPI_INF_ENGINE option is deprecated. Use OPENCV_GAPI_WITH_OPENVINO option instead.")
- endif()
- ocv_option(OPENCV_GAPI_WITH_OPENVINO "G-API: Enable OpenVINO Toolkit support" ${initial_value})
-endif()
-
-set(the_description "OpenCV G-API Core Module")
-
-ocv_add_module(gapi
- REQUIRED
- opencv_imgproc
- OPTIONAL
- opencv_video opencv_stereo
- WRAP
- python
-)
-
-if(MSVC)
- if(MSVC_VERSION LESS 1910)
- # Disable obsolete warning C4503 popping up on MSVC << 15 2017
- # https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-1-c4503?view=vs-2019
- # and IE deprecated code warning C4996
- ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4503 /wd4996)
- endif()
- if((MSVC_VERSION LESS 1920) OR ARM OR AARCH64) # MSVS 2015/2017 on x86 and ARM
- ocv_warnings_disable(CMAKE_CXX_FLAGS /wd4702) # 'unreachable code'
- endif()
-endif()
-
-file(GLOB gapi_ext_hdrs
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/*.h"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/cpu/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/fluid/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/gpu/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/infer/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/oak/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/ocl/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/own/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/python/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/render/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/s11n/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/gstreamer/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/streaming/onevpl/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/plaidml/*.hpp"
- "${CMAKE_CURRENT_LIST_DIR}/include/opencv2/${name}/util/*.hpp"
- )
-
-set(gapi_srcs
- # Front-end part
- src/api/grunarg.cpp
- src/api/gorigin.cpp
- src/api/gmat.cpp
- src/api/garray.cpp
- src/api/gopaque.cpp
- src/api/gscalar.cpp
- src/api/gframe.cpp
- src/api/gkernel.cpp
- src/api/gbackend.cpp
- src/api/gcommon.cpp
- src/api/gproto.cpp
- src/api/gnode.cpp
- src/api/gcall.cpp
- src/api/gcomputation.cpp
- src/api/operators.cpp
- src/api/kernels_core.cpp
- src/api/kernels_imgproc.cpp
- src/api/kernels_video.cpp
- src/api/kernels_nnparsers.cpp
- src/api/kernels_ot.cpp
- src/api/kernels_streaming.cpp
- src/api/kernels_stereo.cpp
- src/api/render.cpp
- src/api/render_ocv.cpp
- src/api/ginfer.cpp
- src/api/media.cpp
- src/api/rmat.cpp
-
- # Compiler part
- src/compiler/gmodel.cpp
- src/compiler/gmodelbuilder.cpp
- src/compiler/gislandmodel.cpp
- src/compiler/gcompiler.cpp
- src/compiler/gcompiled.cpp
- src/compiler/gstreaming.cpp
- src/compiler/passes/helpers.cpp
- src/compiler/passes/dump_dot.cpp
- src/compiler/passes/islands.cpp
- src/compiler/passes/meta.cpp
- src/compiler/passes/kernels.cpp
- src/compiler/passes/exec.cpp
- src/compiler/passes/transformations.cpp
- src/compiler/passes/pattern_matching.cpp
- src/compiler/passes/perform_substitution.cpp
- src/compiler/passes/streaming.cpp
- src/compiler/passes/intrin.cpp
-
- # Executor
- src/executor/gabstractexecutor.cpp
- src/executor/gabstractstreamingexecutor.cpp
- src/executor/gexecutor.cpp
- src/executor/gtbbexecutor.cpp
- src/executor/gthreadedexecutor.cpp
- src/executor/gstreamingexecutor.cpp
- src/executor/gasync.cpp
- src/executor/thread_pool.cpp
-
- # CPU Backend (currently built-in)
- src/backends/cpu/gcpubackend.cpp
- src/backends/cpu/gcpukernel.cpp
- src/backends/cpu/gcpuimgproc.cpp
- src/backends/cpu/gcpustereo.cpp
- src/backends/cpu/gcpuvideo.cpp
- src/backends/cpu/gcpucore.cpp
- src/backends/cpu/gcpuot.cpp
- src/backends/cpu/gnnparsers.cpp
-
- # Fluid Backend (also built-in, FIXME:move away)
- src/backends/fluid/gfluidbuffer.cpp
- src/backends/fluid/gfluidbackend.cpp
- src/backends/fluid/gfluidimgproc.cpp
- src/backends/fluid/gfluidimgproc_func.dispatch.cpp
- src/backends/fluid/gfluidcore.cpp
- src/backends/fluid/gfluidcore_func.dispatch.cpp
-
- # OAK Backend (optional)
- src/backends/oak/goak.cpp
- src/backends/oak/goakbackend.cpp
- src/backends/oak/goak_memory_adapters.cpp
-
- # OCL Backend (currently built-in)
- src/backends/ocl/goclbackend.cpp
- src/backends/ocl/goclkernel.cpp
- src/backends/ocl/goclimgproc.cpp
- src/backends/ocl/goclcore.cpp
-
- # IE Backend. FIXME: should be included by CMake
- # if and only if IE support is enabled
- src/backends/ie/giebackend.cpp
- src/backends/ie/giebackend/giewrapper.cpp
-
- # OV Backend. FIXME: should be included by CMake
- # if and only if OV support is enabled
- src/backends/ov/govbackend.cpp
-
- # ONNX backend
- src/backends/onnx/gonnxbackend.cpp
- src/backends/onnx/dml_ep.cpp
- src/backends/onnx/coreml_ep.cpp
-
- # Render backend
- src/backends/render/grenderocv.cpp
- src/backends/render/ft_render.cpp
-
- # PlaidML Backend
- src/backends/plaidml/gplaidmlcore.cpp
- src/backends/plaidml/gplaidmlbackend.cpp
-
- # Common backend code
- src/backends/common/gmetabackend.cpp
- src/backends/common/gcompoundbackend.cpp
- src/backends/common/gcompoundkernel.cpp
-
- # Serialization API and routines
- src/api/s11n.cpp
- src/backends/common/serialization.cpp
-
- # Streaming backend
- src/backends/streaming/gstreamingbackend.cpp
-
- # Python bridge
- src/backends/ie/bindings_ie.cpp
- src/backends/onnx/bindings_onnx.cpp
- src/backends/ov/bindings_ov.cpp
- src/backends/python/gpythonbackend.cpp
-
- # Queue Streaming source
- src/streaming/queue_source.cpp
-
- # OpenVPL Streaming source
- src/streaming/onevpl/source.cpp
- src/streaming/onevpl/source_priv.cpp
- src/streaming/onevpl/file_data_provider.cpp
- src/streaming/onevpl/cfg_params.cpp
- src/streaming/onevpl/cfg_params_parser.cpp
- src/streaming/onevpl/utils.cpp
- src/streaming/onevpl/default.cpp
- src/streaming/onevpl/data_provider_interface_exception.cpp
- src/streaming/onevpl/accelerators/surface/base_frame_adapter.cpp
- src/streaming/onevpl/accelerators/surface/cpu_frame_adapter.cpp
- src/streaming/onevpl/accelerators/surface/dx11_frame_adapter.cpp
- src/streaming/onevpl/accelerators/surface/surface.cpp
- src/streaming/onevpl/accelerators/surface/surface_pool.cpp
- src/streaming/onevpl/accelerators/utils/shared_lock.cpp
- src/streaming/onevpl/accelerators/accel_policy_cpu.cpp
- src/streaming/onevpl/accelerators/accel_policy_dx11.cpp
- src/streaming/onevpl/accelerators/accel_policy_va_api.cpp
- src/streaming/onevpl/accelerators/dx11_alloc_resource.cpp
- src/streaming/onevpl/engine/engine_session.cpp
- src/streaming/onevpl/engine/processing_engine_base.cpp
- src/streaming/onevpl/engine/decode/decode_engine_legacy.cpp
- src/streaming/onevpl/engine/decode/decode_session.cpp
- src/streaming/onevpl/engine/transcode/transcode_engine_legacy.cpp
- src/streaming/onevpl/engine/transcode/transcode_session.cpp
- src/streaming/onevpl/engine/preproc/preproc_engine.cpp
- src/streaming/onevpl/engine/preproc/preproc_session.cpp
- src/streaming/onevpl/engine/preproc/preproc_dispatcher.cpp
- src/streaming/onevpl/engine/preproc_engine_interface.cpp
- src/streaming/onevpl/demux/async_mfp_demux_data_provider.cpp
- src/streaming/onevpl/data_provider_dispatcher.cpp
-
- src/streaming/onevpl/cfg_param_device_selector.cpp
- src/streaming/onevpl/device_selector_interface.cpp
-
- # GStreamer Streaming source
- src/streaming/gstreamer/gstreamer_pipeline_facade.cpp
- src/streaming/gstreamer/gstreamerpipeline.cpp
- src/streaming/gstreamer/gstreamersource.cpp
- src/streaming/gstreamer/gstreamer_buffer_utils.cpp
- src/streaming/gstreamer/gstreamer_media_adapter.cpp
- src/streaming/gstreamer/gstreamerenv.cpp
-
- # Utils (ITT tracing)
- src/utils/itt.cpp
- )
-
-file(GLOB_RECURSE gapi_3rdparty_srcs
- "${CMAKE_CURRENT_LIST_DIR}/src/3rdparty/vasot/src/*.cpp"
-)
-
-ocv_add_dispatched_file(backends/fluid/gfluidimgproc_func SSE4_1 AVX2)
-ocv_add_dispatched_file(backends/fluid/gfluidcore_func SSE4_1 AVX2)
-
-ocv_list_add_prefix(gapi_srcs "${CMAKE_CURRENT_LIST_DIR}/")
-
-# For IDE users
-ocv_source_group("Src" FILES ${gapi_srcs} ${gapi_3rdparty_srcs})
-ocv_source_group("Include" FILES ${gapi_ext_hdrs})
-
-ocv_set_module_sources(HEADERS ${gapi_ext_hdrs} SOURCES ${gapi_srcs} ${gapi_3rdparty_srcs})
-ocv_module_include_directories("${CMAKE_CURRENT_LIST_DIR}/src")
-
-# VAS Object Tracking includes
-ocv_module_include_directories(${CMAKE_CURRENT_LIST_DIR}/src/3rdparty/vasot/include)
-
-ocv_create_module()
-
-ocv_target_link_libraries(${the_module} PRIVATE ade)
-
-if(TARGET ocv.3rdparty.openvino AND OPENCV_GAPI_WITH_OPENVINO)
- ocv_target_link_libraries(${the_module} PRIVATE ocv.3rdparty.openvino)
- ocv_install_used_external_targets(ocv.3rdparty.openvino)
-endif()
-
-if(HAVE_TBB)
- ocv_target_link_libraries(${the_module} PRIVATE tbb)
-endif()
-
-# TODO: Consider support of ITT in G-API standalone mode.
-if(CV_TRACE AND HAVE_ITT)
- ocv_target_compile_definitions(${the_module} PRIVATE -DOPENCV_WITH_ITT=1)
- ocv_module_include_directories(${ITT_INCLUDE_DIRS})
- ocv_target_link_libraries(${the_module} PRIVATE ${ITT_LIBRARIES})
-endif()
-
-set(__test_extra_deps "")
-if(TARGET ocv.3rdparty.openvino AND OPENCV_GAPI_WITH_OPENVINO)
- list(APPEND __test_extra_deps ocv.3rdparty.openvino)
-endif()
-ocv_add_accuracy_tests(${__test_extra_deps})
-
-# FIXME: test binary is linked with ADE directly since ADE symbols
-# are not exported from libopencv_gapi.so in any form - thus
-# there're two copies of ADE code in memory when tests run (!)
-# src/ is specified to include dirs for INTERNAL tests only.
-if(TARGET opencv_test_gapi)
- target_include_directories(opencv_test_gapi PRIVATE "${CMAKE_CURRENT_LIST_DIR}/src")
- target_link_libraries(opencv_test_gapi PRIVATE ade)
-endif()
-
-if(HAVE_TBB AND TARGET opencv_test_gapi)
- ocv_target_link_libraries(opencv_test_gapi PRIVATE tbb)
-endif()
-
-if(HAVE_FREETYPE)
- ocv_target_compile_definitions(${the_module} PRIVATE -DHAVE_FREETYPE)
- if(TARGET opencv_test_gapi)
- ocv_target_compile_definitions(opencv_test_gapi PRIVATE -DHAVE_FREETYPE)
- endif()
- ocv_target_link_libraries(${the_module} PRIVATE ${FREETYPE_LIBRARIES})
- ocv_target_include_directories(${the_module} PRIVATE ${FREETYPE_INCLUDE_DIRS})
-endif()
-
-if(HAVE_OAK)
- ocv_target_compile_definitions(${the_module} PRIVATE -DHAVE_OAK)
- if(TARGET opencv_test_gapi)
- ocv_target_compile_definitions(opencv_test_gapi PRIVATE -DHAVE_OAK)
- endif()
- ocv_target_link_libraries(${the_module} PRIVATE depthai::core)
-endif()
-
-if(HAVE_PLAIDML)
- ocv_target_compile_definitions(${the_module} PRIVATE -DHAVE_PLAIDML)
- if(TARGET opencv_test_gapi)
- ocv_target_compile_definitions(opencv_test_gapi PRIVATE -DHAVE_PLAIDML)
- endif()
- ocv_target_link_libraries(${the_module} PRIVATE ${PLAIDML_LIBRARIES})
- ocv_target_include_directories(${the_module} SYSTEM PRIVATE ${PLAIDML_INCLUDE_DIRS})
-endif()
-
-if(HAVE_GAPI_ONEVPL)
- if(TARGET opencv_test_gapi)
- ocv_target_compile_definitions(opencv_test_gapi PRIVATE -DHAVE_ONEVPL)
- ocv_target_link_libraries(opencv_test_gapi PRIVATE ${VPL_IMPORTED_TARGETS})
- if(MSVC)
- target_compile_options(opencv_test_gapi PUBLIC "/wd4201")
- endif()
- if(HAVE_D3D11 AND HAVE_OPENCL)
- ocv_target_include_directories(opencv_test_gapi SYSTEM PRIVATE ${OPENCL_INCLUDE_DIRS})
- endif()
- endif()
-
- ocv_target_compile_definitions(${the_module} PRIVATE -DHAVE_ONEVPL)
- ocv_target_link_libraries(${the_module} PRIVATE ${VPL_IMPORTED_TARGETS})
-
- if(HAVE_DIRECTX AND HAVE_D3D11)
- ocv_target_link_libraries(${the_module} PRIVATE d3d11 dxgi)
- endif()
- if(WIN32)
- ocv_target_link_libraries(${the_module} PRIVATE mf mfuuid mfplat shlwapi mfreadwrite)
- endif()
- if(HAVE_D3D11 AND HAVE_OPENCL)
- ocv_target_include_directories(${the_module} SYSTEM PRIVATE ${OPENCL_INCLUDE_DIRS})
- endif()
-
- if(UNIX AND HAVE_VA)
- ocv_target_include_directories(${the_module} SYSTEM PRIVATE ${VA_INCLUDE_DIR})
- ocv_target_link_libraries(${the_module} PRIVATE ${VA_LIBRARIES})
- if(TARGET opencv_test_gapi)
- ocv_target_include_directories(opencv_test_gapi SYSTEM PRIVATE ${VA_INCLUDE_DIR})
- ocv_target_link_libraries(opencv_test_gapi PRIVATE ${VA_LIBRARIES})
- endif()
- endif()
-endif()
-
-ocv_option(OPENCV_GAPI_GSTREAMER "Build G-API with GStreamer support" HAVE_GSTREAMER)
-if(HAVE_GSTREAMER AND OPENCV_GAPI_GSTREAMER)
- if(TARGET opencv_test_gapi)
- ocv_target_compile_definitions(opencv_test_gapi PRIVATE -DHAVE_GSTREAMER)
- ocv_target_link_libraries(opencv_test_gapi PRIVATE ocv.3rdparty.gstreamer)
- endif()
- ocv_target_compile_definitions(${the_module} PRIVATE -DHAVE_GSTREAMER)
- ocv_target_link_libraries(${the_module} PRIVATE ocv.3rdparty.gstreamer)
-endif()
-
-if(WIN32)
- # Required for htonl/ntohl on Windows
- ocv_target_link_libraries(${the_module} PRIVATE wsock32 ws2_32)
-endif()
-
-if(HAVE_DIRECTML)
- ocv_target_compile_definitions(${the_module} PRIVATE HAVE_DIRECTML=1)
-endif()
-
-if(HAVE_ONNX)
- ocv_target_link_libraries(${the_module} PRIVATE ${ONNX_LIBRARY})
- ocv_target_compile_definitions(${the_module} PRIVATE HAVE_ONNX=1)
- if(HAVE_ONNX_DML)
- ocv_target_compile_definitions(${the_module} PRIVATE HAVE_ONNX_DML=1)
- endif()
- if(TARGET opencv_test_gapi)
- ocv_target_compile_definitions(opencv_test_gapi PRIVATE HAVE_ONNX=1)
- ocv_target_link_libraries(opencv_test_gapi PRIVATE ${ONNX_LIBRARY})
- endif()
-endif()
-
-ocv_install_3rdparty_licenses(vasot "${CMAKE_CURRENT_SOURCE_DIR}/src/3rdparty/vasot/LICENSE.txt")
-
-ocv_add_perf_tests()
-ocv_add_samples()
-
-# Required for sample with inference on host
-if(TARGET example_gapi_onevpl_infer_with_advanced_device_selection)
- if(TARGET ocv.3rdparty.openvino AND OPENCV_GAPI_WITH_OPENVINO)
- ocv_target_link_libraries(example_gapi_onevpl_infer_with_advanced_device_selection PRIVATE ocv.3rdparty.openvino)
- endif()
- if(HAVE_DIRECTX AND HAVE_D3D11)
- ocv_target_link_libraries(example_gapi_onevpl_infer_with_advanced_device_selection PRIVATE d3d11 dxgi)
- endif()
- if(HAVE_D3D11 AND HAVE_OPENCL)
- ocv_target_include_directories(example_gapi_onevpl_infer_with_advanced_device_selection SYSTEM PRIVATE ${OPENCL_INCLUDE_DIRS})
- endif()
- if(UNIX AND HAVE_VA)
- message(STATUS "GAPI VPL samples with VAAPI")
- ocv_target_include_directories(example_gapi_onevpl_infer_with_advanced_device_selection SYSTEM PRIVATE ${VA_INCLUDE_DIR})
- ocv_target_link_libraries(example_gapi_onevpl_infer_with_advanced_device_selection PRIVATE ${VA_LIBRARIES})
- endif()
-endif()
-
-if(TARGET example_gapi_pipeline_modeling_tool)
- if(WIN32)
- ocv_target_link_libraries(example_gapi_pipeline_modeling_tool winmm.lib)
- endif()
-endif()
-
-# perf test dependencies postprocessing
-if(HAVE_GAPI_ONEVPL)
- # NB: TARGET opencv_perf_gapi doesn't exist before `ocv_add_perf_tests`
- # src/ is specified to include dirs for INTERNAL tests only.
- if(TARGET opencv_perf_gapi)
- target_include_directories(opencv_perf_gapi PRIVATE "${CMAKE_CURRENT_LIST_DIR}/src")
- ocv_target_compile_definitions(opencv_perf_gapi PRIVATE -DHAVE_ONEVPL)
- ocv_target_link_libraries(opencv_perf_gapi PRIVATE ${VPL_IMPORTED_TARGETS})
- if(HAVE_D3D11 AND HAVE_OPENCL)
- ocv_target_include_directories(opencv_perf_gapi SYSTEM PRIVATE ${OPENCL_INCLUDE_DIRS})
- endif()
- endif()
-endif()
diff --git a/modules/gapi/cmake/DownloadADE.cmake b/modules/gapi/cmake/DownloadADE.cmake
deleted file mode 100644
index 8ddaadb511..0000000000
--- a/modules/gapi/cmake/DownloadADE.cmake
+++ /dev/null
@@ -1,51 +0,0 @@
-set(ade_src_dir "${OpenCV_BINARY_DIR}/3rdparty/ade")
-set(ade_filename "v0.1.2e.zip")
-set(ade_subdir "ade-0.1.2e")
-set(ade_md5 "962ce79e0b95591f226431f7b5f152cd")
-ocv_download(FILENAME ${ade_filename}
- HASH ${ade_md5}
- URL
- "${OPENCV_ADE_URL}"
- "$ENV{OPENCV_ADE_URL}"
- "https://github.com/opencv/ade/archive/"
- DESTINATION_DIR ${ade_src_dir}
- ID ADE
- STATUS res
- UNPACK RELATIVE_URL)
-
-if (NOT res)
- return()
-endif()
-
-set(ADE_root "${ade_src_dir}/${ade_subdir}/sources/ade")
-file(GLOB_RECURSE ADE_sources "${ADE_root}/source/*.cpp")
-file(GLOB_RECURSE ADE_include "${ADE_root}/include/ade/*.hpp")
-add_library(ade STATIC ${OPENCV_3RDPARTY_EXCLUDE_FROM_ALL}
- ${ADE_include}
- ${ADE_sources}
-)
-
-# https://github.com/opencv/ade/issues/32
-if(CV_CLANG AND CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 13.1)
- ocv_warnings_disable(CMAKE_CXX_FLAGS -Wdeprecated-copy)
-endif()
-
-target_include_directories(ade PUBLIC $)
-set_target_properties(ade PROPERTIES
- POSITION_INDEPENDENT_CODE True
- OUTPUT_NAME ade
- DEBUG_POSTFIX "${OPENCV_DEBUG_POSTFIX}"
- COMPILE_PDB_NAME ade
- COMPILE_PDB_NAME_DEBUG "ade${OPENCV_DEBUG_POSTFIX}"
- ARCHIVE_OUTPUT_DIRECTORY ${3P_LIBRARY_OUTPUT_PATH}
-)
-
-if(ENABLE_SOLUTION_FOLDERS)
- set_target_properties(ade PROPERTIES FOLDER "3rdparty")
-endif()
-
-if(NOT BUILD_SHARED_LIBS)
- ocv_install_target(ade EXPORT OpenCVModules ARCHIVE DESTINATION ${OPENCV_3P_LIB_INSTALL_PATH} COMPONENT dev OPTIONAL)
-endif()
-
-ocv_install_3rdparty_licenses(ade "${ade_src_dir}/${ade_subdir}/LICENSE")
diff --git a/modules/gapi/cmake/init.cmake b/modules/gapi/cmake/init.cmake
deleted file mode 100644
index dd4b0bccfa..0000000000
--- a/modules/gapi/cmake/init.cmake
+++ /dev/null
@@ -1,49 +0,0 @@
-OCV_OPTION(WITH_ADE "Enable ADE framework (required for Graph API module)" ON)
-
-OCV_OPTION(WITH_FREETYPE "Enable FreeType framework" OFF)
-OCV_OPTION(WITH_PLAIDML "Include PlaidML2 support" OFF)
-OCV_OPTION(WITH_OAK "Include OpenCV AI Kit support" OFF)
-
-if(NOT WITH_ADE)
- return()
-endif()
-
-if(ade_DIR)
- # if ade_DIR is set, use ADE-supplied CMake script
- # to set up variables to the prebuilt ADE
- find_package(ade 0.1.0)
-endif()
-
-if(NOT TARGET ade)
- # if ade_DIR is not set, try to use automatically
- # downloaded one (if there any)
- include("${CMAKE_CURRENT_LIST_DIR}/DownloadADE.cmake")
-endif()
-
-if(WITH_FREETYPE)
- ocv_check_modules(FREETYPE freetype2)
- if (FREETYPE_FOUND)
- set(HAVE_FREETYPE TRUE)
- endif()
-endif()
-
-if(WITH_PLAIDML)
- find_package(PlaidML2 CONFIG QUIET)
- if (PLAIDML_FOUND)
- set(HAVE_PLAIDML TRUE)
- endif()
-endif()
-
-if(WITH_GAPI_ONEVPL)
- find_package(VPL)
- if(VPL_FOUND)
- set(HAVE_GAPI_ONEVPL TRUE)
- endif()
-endif()
-
-if(WITH_OAK)
- find_package(depthai QUIET)
- if(depthai_FOUND)
- set(HAVE_OAK TRUE)
- endif()
-endif()
diff --git a/modules/gapi/cmake/standalone.cmake b/modules/gapi/cmake/standalone.cmake
deleted file mode 100644
index f81c1c8a85..0000000000
--- a/modules/gapi/cmake/standalone.cmake
+++ /dev/null
@@ -1,62 +0,0 @@
-if("${CMAKE_BUILD_TYPE}" STREQUAL "")
- set(CMAKE_BUILD_TYPE "Release")
-endif()
-
-if (NOT TARGET ade )
- find_package(ade 0.1.0 REQUIRED)
-endif()
-
-if (WITH_GAPI_ONEVPL)
- find_package(VPL)
- if(VPL_FOUND)
- set(HAVE_GAPI_ONEVPL TRUE)
- endif()
-endif()
-
-set(FLUID_TARGET fluid)
-set(FLUID_ROOT "${CMAKE_CURRENT_LIST_DIR}/../")
-
-file(GLOB FLUID_includes "${FLUID_ROOT}/include/opencv2/*.hpp"
- "${FLUID_ROOT}/include/opencv2/gapi/g*.hpp"
- "${FLUID_ROOT}/include/opencv2/gapi/util/*.hpp"
- "${FLUID_ROOT}/include/opencv2/gapi/own/*.hpp"
- "${FLUID_ROOT}/include/opencv2/gapi/fluid/*.hpp")
-file(GLOB FLUID_sources "${FLUID_ROOT}/src/api/g*.cpp"
- "${FLUID_ROOT}/src/api/rmat.cpp"
- "${FLUID_ROOT}/src/api/media.cpp"
- "${FLUID_ROOT}/src/compiler/*.cpp"
- "${FLUID_ROOT}/src/compiler/passes/*.cpp"
- "${FLUID_ROOT}/src/executor/*.cpp"
- "${FLUID_ROOT}/src/backends/fluid/*.cpp"
- "${FLUID_ROOT}/src/backends/streaming/*.cpp"
- "${FLUID_ROOT}/src/backends/common/*.cpp")
-
-add_library(${FLUID_TARGET} STATIC ${FLUID_includes} ${FLUID_sources})
-
-target_include_directories(${FLUID_TARGET}
- PUBLIC $
- PRIVATE ${FLUID_ROOT}/src)
-
-target_compile_definitions(${FLUID_TARGET} PUBLIC GAPI_STANDALONE
-# This preprocessor definition resolves symbol clash when
-# standalone fluid meets gapi ocv module in one application
- PUBLIC cv=fluidcv)
-
-set_target_properties(${FLUID_TARGET} PROPERTIES POSITION_INDEPENDENT_CODE True)
-set_property(TARGET ${FLUID_TARGET} PROPERTY CXX_STANDARD 11)
-
-if(MSVC)
- target_compile_options(${FLUID_TARGET} PUBLIC "/wd4251")
- target_compile_options(${FLUID_TARGET} PUBLIC "/wd4275")
- target_compile_definitions(${FLUID_TARGET} PRIVATE _CRT_SECURE_NO_DEPRECATE)
- # Disable obsollete warning C4503 popping up on MSVC <<2017
- # https://docs.microsoft.com/en-us/cpp/error-messages/compiler-warnings/compiler-warning-level-1-c4503?view=vs-2019
- set_target_properties(${FLUID_TARGET} PROPERTIES COMPILE_FLAGS "/wd4503")
-endif()
-
-target_link_libraries(${FLUID_TARGET} PRIVATE ade)
-
-if(WIN32)
- # Required for htonl/ntohl on Windows
- target_link_libraries(${FLUID_TARGET} PRIVATE wsock32 ws2_32)
-endif()
diff --git a/modules/gapi/doc/00-root.markdown b/modules/gapi/doc/00-root.markdown
deleted file mode 100644
index cb99495c1b..0000000000
--- a/modules/gapi/doc/00-root.markdown
+++ /dev/null
@@ -1,125 +0,0 @@
-# Graph API {#gapi}
-
-# Introduction {#gapi_root_intro}
-
-OpenCV Graph API (or G-API) is a new OpenCV module targeted to make
-regular image processing fast and portable. These two goals are
-achieved by introducing a new graph-based model of execution.
-
-G-API is a special module in OpenCV -- in contrast with the majority
-of other main modules, this one acts as a framework rather than some
-specific CV algorithm. G-API provides means to define CV operations,
-construct graphs (in form of expressions) using it, and finally
-implement and run the operations for a particular backend.
-
-@note G-API is a new module and now is in active development. It's API
-is volatile at the moment and there may be minor but
-compatibility-breaking changes in the future.
-
-# Contents
-
-G-API documentation is organized into the following chapters:
-
-- @subpage gapi_purposes
-
- The motivation behind G-API and its goals.
-
-- @subpage gapi_hld
-
- General overview of G-API architecture and its major internal
- components.
-
-- @subpage gapi_kernel_api
-
- Learn how to introduce new operations in G-API and implement it for
- various backends.
-
-- @subpage gapi_impl
-
- Low-level implementation details of G-API, for those who want to
- contribute.
-
-- API Reference: functions and classes
-
- - @subpage gapi_ref
-
- Core G-API classes, data types, backends, etc.
-
- - @subpage gapi_core
-
- Core G-API operations - arithmetic, boolean, and other matrix
- operations;
-
- - @subpage gapi_imgproc
-
- Image processing functions: color space conversions, various
- filters, etc.
-
- - @subpage gapi_video
-
- Video processing functionality.
-
- - @subpage gapi_draw
-
- Drawing and composition functionality
-
-# API Example {#gapi_example}
-
-A very basic example of G-API pipeline is shown below:
-
-@include modules/gapi/samples/api_example.cpp
-
-
-
-G-API is a separate OpenCV module so its header files have to be
-included explicitly. The first four lines of `main()` create and
-initialize OpenCV's standard video capture object, which fetches
-video frames from either an attached camera or a specified file.
-
-G-API pipeline is constructed next. In fact, it is a series of G-API
-operation calls on cv::GMat data. The important aspect of G-API is
-that this code block is just a declaration of actions, but not the
-actions themselves. No processing happens at this point, G-API only
-tracks which operations form pipeline and how it is connected. G-API
-_Data objects_ (here it is cv::GMat) are used to connect operations
-each other. `in` is an _empty_ cv::GMat signalling that it is a
-beginning of computation.
-
-After G-API code is written, it is captured into a call graph with
-instantiation of cv::GComputation object. This object takes
-input/output data references (in this example, `in` and `out`
-cv::GMat objects, respectively) as parameters and reconstructs the
-call graph based on all the data flow between `in` and `out`.
-
-cv::GComputation is a thin object in sense that it just captures which
-operations form up a computation. However, it can be used to execute
-computations -- in the following processing loop, every captured frame (a
-cv::Mat `input_frame`) is passed to cv::GComputation::apply().
-
-
-
-cv::GComputation::apply() is a polimorphic method which accepts a
-variadic number of arguments. Since this computation is defined on one
-input, one output, a special overload of cv::GComputation::apply() is
-used to pass input data and get output data.
-
-Internally, cv::GComputation::apply() compiles the captured graph for
-the given input parameters and executes the compiled graph on data
-immediately.
-
-There is a number important concepts can be outlines with this example:
-* Graph declaration and graph execution are distinct steps;
-* Graph is built implicitly from a sequence of G-API expressions;
-* G-API supports function-like calls -- e.g. cv::gapi::resize(), and
- operators, e.g operator|() which is used to compute bitwise OR;
-* G-API syntax aims to look pure: every operation call within a graph
- yields a new result, thus forming a directed acyclic graph (DAG);
-* Graph declaration is not bound to any data -- real data objects
- (cv::Mat) come into picture after the graph is already declared.
-
-
-
-See [tutorials and porting examples](@ref tutorial_table_of_content_gapi)
-to learn more on various G-API features and concepts.
-
-
diff --git a/modules/gapi/doc/01-background.markdown b/modules/gapi/doc/01-background.markdown
deleted file mode 100644
index 08014d1f67..0000000000
--- a/modules/gapi/doc/01-background.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
-# Why Graph API? {#gapi_purposes}
-
-# Motivation behind G-API {#gapi_intro_why}
-
-G-API module brings graph-based model of execution to OpenCV. This
-chapter briefly describes how this new model can help software
-developers in two aspects: optimizing and porting image processing
-algorithms.
-
-## Optimizing with Graph API {#gapi_intro_opt}
-
-Traditionally OpenCV provided a lot of stand-alone image processing
-functions (see modules `core` and `imgproc`). Many of that functions
-are well-optimized (e.g. vectorized for specific CPUs, parallel, etc)
-but still the out-of-box optimization scope has been limited to a
-single function only -- optimizing the whole algorithm built atop of that
-functions was a responsibility of a programmer.
-
-OpenCV 3.0 introduced _Transparent API_ (or _T-API_) which allowed to
-offload OpenCV function calls transparently to OpenCL devices and save
-on Host/Device data transfers with cv::UMat -- and it was a great step
-forward. However, T-API is a dynamic API -- user code still remains
-unconstrained and OpenCL kernels are enqueued in arbitrary order, thus
-eliminating further pipeline-level optimization potential.
-
-G-API brings implicit graph model to OpenCV 4.0. Graph model captures
-all operations and its data dependencies in a pipeline and so provides
-G-API framework with extra information to do pipeline-level
-optimizations.
-
-The cornerstone of graph-based optimizations is _Tiling_. Tiling
-allows to break the processing into smaller parts and reorganize
-operations to enable data parallelism, improve data locality, and save
-memory footprint. Data locality is an especially important aspect of
-software optimization due to diffent costs of memory access on modern
-computer architectures -- the more data is reused in the first level
-cache, the more efficient pipeline is.
-
-Definitely the aforementioned techniques can be applied manually --
-but it requires extra skills and knowledge of the target platform and
-the algorithm implementation changes irrevocably -- becoming more
-specific, less flexible, and harder to extend and maintain.
-
-G-API takes this responsibility and complexity from user and does the
-majority of the work by itself, keeping the algorithm code clean from
-device or optimization details. This approach has its own limitations,
-though, as graph model is a _constrained_ model and not every
-algorithm can be represented as a graph, so the G-API scope is limited
-only to regular image processing -- various filters, arithmetic,
-binary operations, and well-defined geometrical transformations.
-
-## Porting with Graph API {#gapi_intro_port}
-
-The essence of G-API is declaring a sequence of operations to run, and
-then executing that sequence. G-API is a constrained API, so it puts a
-number of limitations on which operations can form a pipeline and
-which data these operations may exchange each other.
-
-This formalization in fact helps to make an algorithm portable. G-API
-clearly separates operation _interfaces_ from its _implementations_.
-
-One operation (_kernel_) may have multiple implementations even for a
-single device (e.g., OpenCV-based "reference" implementation and a
-tiled optimized implementation, both running on CPU). Graphs (or
-_Computations_ in G-API terms) are built only using operation
-interfaces, not implementations -- thus the same graph can be executed
-on different devices (and, of course, using different optimization
-techniques) with little-to-no changes in the graph itself.
-
-G-API supports plugins (_Backends_) which aggregate logic and
-intelligence on what is the best way to execute on a particular
-platform. Once a pipeline is built with G-API, it can be parametrized
-to use either of the backends (or a combination of it) and so a graph
-can be ported easily to a new platform.
-
-@sa @ref gapi_hld
diff --git a/modules/gapi/doc/10-hld-overview.md b/modules/gapi/doc/10-hld-overview.md
deleted file mode 100644
index 6de6efa921..0000000000
--- a/modules/gapi/doc/10-hld-overview.md
+++ /dev/null
@@ -1,160 +0,0 @@
-# High-level design overview {#gapi_hld}
-
-[TOC]
-
-# G-API High-level design overview
-
-G-API is a heterogeneous framework and provides an unified API to
-program image processing pipelines with a number of supported
-backends.
-
-The key design idea is to keep pipeline code itself platform-neutral
-while specifying which kernels to use and which devices to utilize
-using extra parameters at graph compile (configuration) time. This
-requirement has led to the following architecture:
-
-
-
-
-
-There are three layers in this architecture:
-* **API Layer** -- this is the top layer, which implements G-API
- public interface, its building blocks and semantics.
- When user constructs a pipeline with G-API, he interacts with this
- layer directly, and the entities the user operates on (like cv::GMat
- or cv::GComputation) are provided by this layer.
-* **Graph Compiler Layer** -- this is the intermediate layer which
- unrolls user computation into a graph and then applies a number of
- transformations to it (e.g. optimizations). This layer is built atop
- of [ADE Framework](@ref gapi_detail_ade).
-* **Backends Layer** -- this is the lowest level layer, which lists a
- number of _Backends_. In contrast with the above two layers,
- backends are highly coupled with low-level platform details, with
- every backend standing for every platform. A backend operates on a
- processed graph (coming from the graph compiler) and executes this
- graph optimally for a specific platform or device.
-
-# API layer {#gapi_api_layer}
-
-API layer is what user interacts with when defining and using a
-pipeline (a Computation in G-API terms). API layer defines a set of
-G-API _dynamic_ objects which can be used as inputs, outputs, and
-intermediate data objects within a graph:
-* cv::GMat
-* cv::GScalar
-* cv::GArray (template class)
-
-API layer specifies a list of Operations which are defined on these
-data objects -- so called kernels. See G-API [core](@ref gapi_core)
-and [imgproc](@ref gapi_imgproc) namespaces for details on which
-operations G-API provides by default.
-
-G-API is not limited to these operations only -- users can define
-their own kernels easily using a special macro G_TYPED_KERNEL().
-
-API layer is also responsible for marshalling and storing operation
-parameters on pipeline creation. In addition to the aforementioned
-G-API dynamic objects, operations may also accept arbitrary
-parameters (more on this [here](@ref gapi_detail_params)), so API
-layer captures its values and stores internally upon the moment of
-execution.
-
-Finally, cv::GComputation and cv::GCompiled are the remaining
-important components of API layer. The former wraps a series of G-API
-expressions into an object (graph), and the latter is a product of
-graph _compilation_ (see [this chapter](@ref gapi_detail_compiler) for
-details).
-
-# Graph compiler layer {#gapi_compiler}
-
-Every G-API computation is compiled before it executes. Compilation
-process is triggered in two ways:
-* _implicitly_, when cv::GComputation::apply() is used. In this case,
- graph compilation is then immediately followed by execution.
-* _explicitly_, when cv::GComputation::compile() is used. In this case,
- a cv::GCompiled object is returned which then can be invoked as a
- C++ functor.
-
-The first way is recommended for cases when input data format is not
-known in advance -- e.g. when it comes from an arbitrary input file.
-The second way is recommended for deployment (production) scenarios
-where input data characteristics are usually predefined.
-
-Graph compilation process is built atop of ADE Framework. Initially, a
-bipartite graph is generated from expressions captured by API layer.
-This graph contains nodes of two types: _Data_ and _Operations_. Graph
-always starts and ends with a Data node(s), with Operations nodes
-in-between. Every Operation node has inputs and outputs, both are Data
-nodes.
-
-After the initial graph is generated, it is actually processed by a
-number of graph transformations, called _passes_. ADE Framework acts
-as a compiler pass management engine, and passes are written
-specifically for G-API.
-
-There are different passes which check graph validity, refine details
-on operations and data, organize nodes into clusters ("Islands") based
-on affinity or user-specified regioning[TBD], and more. Backends also
-are able to inject backend-specific passes into the compilation
-process, see more on this in the [dedicated chapter](@ref gapi_detail_meta).
-
-Result of graph compilation is a compiled object, represented by class
-cv::GCompiled. A new cv::GCompiled object is always created regardless
-if there was an explicit or implicit compilation request (see
-above). Actual graph execution happens within cv::GCompiled and is
-determined by backends which participated in the graph compilation.
-
-@sa cv::GComputation::apply(), cv::GComputation::compile(), cv::GCompiled
-
-# Backends layer {#gapi_backends}
-
-The above diagram lists two backends, _OpenCV_ and _Fluid_. _OpenCV_
-is so-called "reference backend", which implements G-API operations
-using plain old OpenCV functions. This backend is useful for
-prototyping on a familiar development system. _Fluid_ is a plugin for
-cache-efficient execution on CPU -- it implements a different
-execution policy and operates with its own, special kernels. Fluid
-backend allows to achieve less memory footprint and better memory
-locality when running on CPU.
-
-There may be more backends available, e.g. Halide, OpenCL, etc. --
-G-API provides an uniform internal API to develop backends so any
-enthusiast or a company are free to scale G-API on a new platform or
-accelerator. In terms of OpenCV infrastructure, every new backend is a
-new distinct OpenCV module, which extends G-API when build as a part
-of OpenCV.
-
-# Graph execution {#gapi_compiled}
-
-The way graph executed is defined by backends selected for
-compilation. In fact, every backend builds its own execution script as
-the final stage of graph compilation process, when an executable
-(compiled) object is being generated. For example, in OpenCV backend,
-this script is just a topologically-sorted sequence of OpenCV
-functions to call; for Fluid backend, it is a similar thing -- a
-topologically sorted list of _Agents_ processing lines of input on
-every iteration.
-
-Graph execution is triggered in two ways:
-* via cv::GComputation::apply(), with graph compiled in-place exactly
- for the given input data;
-* via cv::GCompiled::operator()(), when the graph has been precompiled.
-
-Both methods are polimorphic and take a variadic number of arguments,
-with validity checks performed in runtime. If a number, shapes, and
-formats of passed data objects differ from expected, a runtime
-exception is thrown. G-API also provides _typed_ wrappers to move
-these checks to the compile time -- see `cv::GComputationT<>`.
-
-G-API graph execution is declared stateless -- it means that a
-compiled functor (cv::GCompiled) acts like a pure C++ function and
-provides the same result for the same set of input arguments.
-
-Both execution methods take \f$N+M\f$ parameters, where \f$N\f$ is a
-number of inputs, and \f$M\f$ is a number of outputs on which a
-cv::GComputation is defined. Note that while G-API types (cv::GMat,
-etc) are used in definition, the execution methods accept OpenCV's
-traditional data types (like cv::Mat) which hold actual data -- see
-table in [parameter marshalling](@ref gapi_detail_params).
-
-@sa @ref gapi_impl, @ref gapi_kernel_api
diff --git a/modules/gapi/doc/20-kernel-api.markdown b/modules/gapi/doc/20-kernel-api.markdown
deleted file mode 100644
index 9a7cf39f67..0000000000
--- a/modules/gapi/doc/20-kernel-api.markdown
+++ /dev/null
@@ -1,188 +0,0 @@
-# Kernel API {#gapi_kernel_api}
-
-[TOC]
-
-# G-API Kernel API
-
-The core idea behind G-API is portability -- a pipeline built with
-G-API must be portable (or at least able to be portable). It means
-that either it works out-of-the box when compiled for new platform,
-_or_ G-API provides necessary tools to make it running there, with
-little-to-no changes in the algorithm itself.
-
-This idea can be achieved by separating kernel interface from its
-implementation. Once a pipeline is built using kernel interfaces, it
-becomes implementation-neutral -- the implementation details
-(i.e. which kernels to use) are passed on a separate stage (graph
-compilation).
-
-Kernel-implementation hierarchy may look like:
-
-@dot Kernel API/implementation hierarchy example
-digraph {
- rankdir=BT;
- node [shape=record];
-
- ki_a [label="{ interface\nA}"];
- ki_b [label="{ interface\nB}"];
-
- {rank=same; ki_a ki_b};
-
- "CPU::A" -> ki_a [dir="forward"];
- "OpenCL::A" -> ki_a [dir="forward"];
- "Halide::A" -> ki_a [dir="forward"];
-
- "CPU::B" -> ki_b [dir="forward"];
- "OpenCL::B" -> ki_b [dir="forward"];
- "Halide::B" -> ki_b [dir="forward"];
-}
-@enddot
-
-A pipeline itself then can be expressed only in terms of `A`, `B`, and
-so on, and choosing which implementation to use in execution becomes
-an external parameter.
-
-# Defining a kernel {#gapi_defining_kernel}
-
-G-API provides a macro to define a new kernel interface --
-G_TYPED_KERNEL():
-
-@snippet samples/cpp/tutorial_code/gapi/doc_snippets/kernel_api_snippets.cpp filter2d_api
-
-This macro is a shortcut to a new type definition. It takes three
-arguments to register a new type, and requires type body to be present
-(see [below](@ref gapi_kernel_supp_info)). The macro arguments are:
-1. Kernel interface name -- also serves as a name of new type defined
- with this macro;
-2. Kernel signature -- an `std::function<>`-like signature which defines
- API of the kernel;
-3. Kernel's unique name -- used to identify kernel when its type
- informattion is stripped within the system.
-
-Kernel declaration may be seen as function declaration -- in both cases
-a new entity must be used then according to the way it was defined.
-
-Kernel signature defines kernel's usage syntax -- which parameters
-it takes during graph construction. Implementations can also use this
-signature to derive it into backend-specific callback signatures (see
-next chapter).
-
-Kernel may accept values of any type, and G-API _dynamic_ types are
-handled in a special way. All other types are opaque to G-API and
-passed to kernel in `outMeta()` or in execution callbacks as-is.
-
-Kernel's return value can _only_ be of G-API dynamic type -- cv::GMat,
-cv::GScalar, or `cv::GArray`. If an operation has more than one
-output, it should be wrapped into an `std::tuple<>` (which can contain
-only mentioned G-API types). Arbitrary-output-number operations are
-not supported.
-
-Once a kernel is defined, it can be used in pipelines with special,
-G-API-supplied method "::on()". This method has the same signature as
-defined in kernel, so this code:
-
-@snippet samples/cpp/tutorial_code/gapi/doc_snippets/kernel_api_snippets.cpp filter2d_on
-
-is a perfectly legal construction. This example has some verbosity,
-though, so usually a kernel declaration comes with a C++ function
-wrapper ("factory method") which enables optional parameters, more
-compact syntax, Doxygen comments, etc:
-
-@snippet samples/cpp/tutorial_code/gapi/doc_snippets/kernel_api_snippets.cpp filter2d_wrap
-
-so now it can be used like:
-
-@snippet samples/cpp/tutorial_code/gapi/doc_snippets/kernel_api_snippets.cpp filter2d_wrap_call
-
-# Extra information {#gapi_kernel_supp_info}
-
-In the current version, kernel declaration body (everything within the
-curly braces) must contain a static function `outMeta()`. This function
-establishes a functional dependency between operation's input and
-output metadata.
-
-_Metadata_ is an information about data kernel operates on. Since
-non-G-API types are opaque to G-API, G-API cares only about `G*` data
-descriptors (i.e. dimensions and format of cv::GMat, etc).
-
-`outMeta()` is also an example of how kernel's signature can be
-transformed into a derived callback -- note that in this example,
-`outMeta()` signature exactly follows the kernel signature (defined
-within the macro) but is different -- where kernel expects cv::GMat,
-`outMeta()` takes and returns cv::GMatDesc (a G-API structure metadata
-for cv::GMat).
-
-The point of `outMeta()` is to propagate metadata information within
-computation from inputs to outputs and infer metadata of internal
-(intermediate, temporary) data objects. This information is required
-for further pipeline optimizations, memory allocation, and other
-operations done by G-API framework during graph compilation.
-
-
-
-# Implementing a kernel {#gapi_kernel_implementing}
-
-Once a kernel is declared, its interface can be used to implement
-versions of this kernel in different backends. This concept is
-naturally projected from object-oriented programming
-"Interface/Implementation" idiom: an interface can be implemented
-multiple times, and different implementations of a kernel should be
-substitutable with each other without breaking the algorithm
-(pipeline) logic (Liskov Substitution Principle).
-
-Every backend defines its own way to implement a kernel interface.
-This way is regular, though -- whatever plugin is, its kernel
-implementation must be "derived" from a kernel interface type.
-
-Kernel implementation are then organized into _kernel
-packages_. Kernel packages are passed to cv::GComputation::compile()
-as compile arguments, with some hints to G-API on how to select proper
-kernels (see more on this in "Heterogeneity"[TBD]).
-
-For example, the aforementioned `Filter2D` is implemented in
-"reference" CPU (OpenCV) plugin this way (*NOTE* -- this is a
-simplified form with improper border handling):
-
-@snippet samples/cpp/tutorial_code/gapi/doc_snippets/kernel_api_snippets.cpp filter2d_ocv
-
-Note how CPU (OpenCV) plugin has transformed the original kernel
-signature:
-- Input cv::GMat has been substituted with cv::Mat, holding actual input
- data for the underlying OpenCV function call;
-- Output cv::GMat has been transformed into extra output parameter, thus
- `GCPUFilter2D::run()` takes one argument more than the original
- kernel signature.
-
-The basic intuition for kernel developer here is _not to care_ where
-that cv::Mat objects come from instead of the original cv::GMat -- and
-just follow the signature conventions defined by the plugin. G-API
-will call this method during execution and supply all the necessary
-information (and forward the original opaque data as-is).
-
-# Compound kernels {#gapi_kernel_compound}
-
-Sometimes kernel is a single thing only on API level. It is convenient
-for users, but on a particular implementation side it would be better to
-have multiple kernels (a subgraph) doing the thing instead. An example
-is goodFeaturesToTrack() -- while in OpenCV backend it may remain a
-single kernel, with Fluid it becomes compound -- Fluid can handle Harris
-response calculation but can't do sparse non-maxima suppression and
-point extraction to an STL vector:
-
-
-
-A compound kernel _implementation_ can be defined using a generic
-macro GAPI_COMPOUND_KERNEL():
-
-@snippet samples/cpp/tutorial_code/gapi/doc_snippets/kernel_api_snippets.cpp compound
-
-
-
-
-It is important to distinguish a compound kernel from G-API high-order
-function, i.e. a C++ function which looks like a kernel but in fact
-generates a subgraph. The core difference is that a compound kernel is
-an _implementation detail_ and a kernel implementation may be either
-compound or not (depending on backend capabilities), while a
-high-order function is a "macro" in terms of G-API and so cannot act as
-an interface which then needs to be implemented by a backend.
diff --git a/modules/gapi/doc/30-implementation.markdown b/modules/gapi/doc/30-implementation.markdown
deleted file mode 100644
index cdb5df413b..0000000000
--- a/modules/gapi/doc/30-implementation.markdown
+++ /dev/null
@@ -1,29 +0,0 @@
-# Implementation details {#gapi_impl}
-
-[TOC]
-
-# G-API Implementation details
-
-@note this section is still in progress.
-
-# API layer {#gapi_detail_api}
-
-## Expression unrolling {#gapi_detail_expr}
-
-## Parameter marshalling {#gapi_detail_params}
-
-## Operations representation {#gapi_detail_operations}
-
-# Graph compiler {#gapi_detail_compiler}
-
-## ADE basics {#gapi_detail_ade}
-
-## Graph model representation {#gapi_detail_gmodel}
-
-## G-API metadata and passes {#gapi_detail_meta}
-
-# Backends {#gapi_detail_backends}
-
-## Backend scope of work {#gapi_backend_scope}
-
-## Graph transformation {#gapi_backend_pass}
diff --git a/modules/gapi/doc/pics/demo.jpg b/modules/gapi/doc/pics/demo.jpg
deleted file mode 100644
index 742d135f7a..0000000000
Binary files a/modules/gapi/doc/pics/demo.jpg and /dev/null differ
diff --git a/modules/gapi/doc/pics/gapi_scheme.png b/modules/gapi/doc/pics/gapi_scheme.png
deleted file mode 100644
index 24271e3224..0000000000
Binary files a/modules/gapi/doc/pics/gapi_scheme.png and /dev/null differ
diff --git a/modules/gapi/doc/pics/render_example.png b/modules/gapi/doc/pics/render_example.png
deleted file mode 100644
index b2675988b3..0000000000
Binary files a/modules/gapi/doc/pics/render_example.png and /dev/null differ
diff --git a/modules/gapi/doc/slides/.gitignore b/modules/gapi/doc/slides/.gitignore
deleted file mode 100644
index 309f68f98d..0000000000
--- a/modules/gapi/doc/slides/.gitignore
+++ /dev/null
@@ -1,6 +0,0 @@
-*.bbl
-*.blg
-*.sty
-*.tex
-*-converted-to.pdf
-mtheme.sty/
diff --git a/modules/gapi/doc/slides/README.md b/modules/gapi/doc/slides/README.md
deleted file mode 100644
index 3c81e24fa6..0000000000
--- a/modules/gapi/doc/slides/README.md
+++ /dev/null
@@ -1,27 +0,0 @@
-# G-API Overview
-
-This is the latest overview slide deck on G-API.
-
-## Prerequisites
-
-- [Emacs] v24 or higher;
-- [Org]-mode 8.2.10;
-- `pdflatex`;
-- `texlive-latex-recommended` ([Beamer] package);
-- `texlive-font-utils` (`epstopdf`);
-- `wget` (for `get_sty.sh`).
-
-## Building
-
-1. Download and build the [Metropolis] theme with the script:
-
-```
-$ ./get_sty.sh
-```
-
-2. Now open `gapi_overview.org` with Emacs and press `C-c C-e l P`.
-
-[Emacs]: https://www.gnu.org/software/emacs/
-[Org]: https://orgmode.org/
-[Beamer]: https://ctan.org/pkg/beamer
-[Metropolis]: https://github.com/matze/mtheme
diff --git a/modules/gapi/doc/slides/gapi_overview.org b/modules/gapi/doc/slides/gapi_overview.org
deleted file mode 100644
index 7ed85baeca..0000000000
--- a/modules/gapi/doc/slides/gapi_overview.org
+++ /dev/null
@@ -1,961 +0,0 @@
-#+TITLE: OpenCV 4.4 Graph API
-#+AUTHOR: Dmitry Matveev\newline Intel Corporation
-#+OPTIONS: H:2 toc:t num:t
-#+LATEX_CLASS: beamer
-#+LATEX_CLASS_OPTIONS: [presentation]
-#+LATEX_HEADER: \usepackage{transparent} \usepackage{listings} \usepackage{pgfplots} \usepackage{mtheme.sty/beamerthememetropolis}
-#+LATEX_HEADER: \setbeamertemplate{frame footer}{OpenCV 4.4 G-API: Overview and programming by example}
-#+BEAMER_HEADER: \subtitle{Overview and programming by example}
-#+BEAMER_HEADER: \titlegraphic{ \vspace*{3cm}\hspace*{5cm} {\transparent{0.2}\includegraphics[height=\textheight]{ocv_logo.eps}}}
-#+COLUMNS: %45ITEM %10BEAMER_ENV(Env) %10BEAMER_ACT(Act) %4BEAMER_COL(Col) %8BEAMER_OPT(Opt)
-
-* G-API: What is, why, what's for?
-
-** OpenCV evolution in one slide
-
-*** Version 1.x -- Library inception
-
-- Just a set of CV functions + helpers around (visualization, IO);
-
-*** Version 2.x -- Library rewrite
-
-- OpenCV meets C++, ~cv::Mat~ replaces ~IplImage*~;
-
-*** Version 3.0 -- Welcome Transparent API (T-API)
-
-- ~cv::UMat~ is introduced as a /transparent/ addition to
- ~cv::Mat~;
-- With ~cv::UMat~, an OpenCL kernel can be enqeueud instead of
- immediately running C code;
-- ~cv::UMat~ data is kept on a /device/ until explicitly queried.
-
-** OpenCV evolution in one slide (cont'd)
-# FIXME: Learn proper page-breaking!
-
-*** Version 4.0 -- Welcome Graph API (G-API)
-
-- A new separate module (not a full library rewrite);
-- A framework (or even a /meta/-framework);
-- Usage model:
- - /Express/ an image/vision processing graph and then /execute/ it;
- - Fine-tune execution without changes in the graph;
-- Similar to Halide -- separates logic from
- platform details.
-- More than Halide:
- - Kernels can be written in unconstrained platform-native code;
- - Halide can serve as a backend (one of many).
-
-** OpenCV evolution in one slide (cont'd)
-# FIXME: Learn proper page-breaking!
-
-*** Version 4.2 -- New horizons
-
-- Introduced in-graph inference via OpenVINO™ Toolkit;
-- Introduced video-oriented Streaming execution mode;
-- Extended focus from individual image processing to the full
- application pipeline optimization.
-
-*** Version 4.4 -- More on video
-
-- Introduced a notion of stateful kernels;
- - The road to object tracking, background subtraction, etc. in the
- graph;
-- Added more video-oriented operations (feature detection, Optical
- flow).
-
-** Why G-API?
-
-*** Why introduce a new execution model?
-
-- Ultimately it is all about optimizations;
- - or at least about a /possibility/ to optimize;
-- A CV algorithm is usually not a single function call, but a
- composition of functions;
-- Different models operate at different levels of knowledge on the
- algorithm (problem) we run.
-
-** Why G-API? (cont'd)
-# FIXME: Learn proper page-breaking!
-
-*** Why introduce a new execution model?
-
-- *Traditional* -- every function can be optimized (e.g. vectorized)
- and parallelized, the rest is up to programmer to care about.
-- *Queue-based* -- kernels are enqueued dynamically with no guarantee
- where the end is or what is called next;
-- *Graph-based* -- nearly all information is there, some compiler
- magic can be done!
-
-** What is G-API for?
-
-*** Bring the value of graph model with OpenCV where it makes sense:
-
-- *Memory consumption* can be reduced dramatically;
-- *Memory access* can be optimized to maximize cache reuse;
-- *Parallelism* can be applied automatically where it is hard to do
- it manually;
- - It also becomes more efficient when working with graphs;
-- *Heterogeneity* gets extra benefits like:
- - Avoiding unnecessary data transfers;
- - Shadowing transfer costs with parallel host co-execution;
- - Improving system throughput with frame-level pipelining.
-
-* Programming with G-API
-
-** G-API Basics
-
-*** G-API Concepts
-
-- *Graphs* are built by applying /operations/ to /data objects/;
- - API itself has no "graphs", it is expression-based instead;
-- *Data objects* do not hold actual data, only capture /dependencies/;
-- *Operations* consume and produce data objects.
-- A graph is defined by specifying its /boundaries/ with data objects:
- - What data objects are /inputs/ to the graph?
- - What are its /outputs/?
-
-** The code is worth a thousand words
- :PROPERTIES:
- :BEAMER_opt: shrink=42
- :END:
-
-#+BEGIN_SRC C++
-#include // G-API framework header
-#include // cv::gapi::blur()
-#include // cv::imread/imwrite
-
-int main(int argc, char *argv[]) {
- if (argc < 3) return 1;
-
- cv::GMat in; // Express the graph:
- cv::GMat out = cv::gapi::blur(in, cv::Size(3,3)); // `out` is a result of `blur` of `in`
-
- cv::Mat in_mat = cv::imread(argv[1]); // Get the real data
- cv::Mat out_mat; // Output buffer (may be empty)
-
- cv::GComputation(cv::GIn(in), cv::GOut(out)) // Declare a graph from `in` to `out`
- .apply(cv::gin(in_mat), cv::gout(out_mat)); // ...and run it immediately
-
- cv::imwrite(argv[2], out_mat); // Save the result
- return 0;
-}
-#+END_SRC
-
-** The code is worth a thousand words
- :PROPERTIES:
- :BEAMER_opt: shrink=42
- :END:
-
-*** Traditional OpenCV :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.45
- :END:
-#+BEGIN_SRC C++
-#include
-#include
-
-#include
-
-int main(int argc, char *argv[]) {
- using namespace cv;
- if (argc != 3) return 1;
-
- Mat in_mat = imread(argv[1]);
- Mat gx, gy;
-
- Sobel(in_mat, gx, CV_32F, 1, 0);
- Sobel(in_mat, gy, CV_32F, 0, 1);
-
- Mat mag, out_mat;
- sqrt(gx.mul(gx) + gy.mul(gy), mag);
- mag.convertTo(out_mat, CV_8U);
-
- imwrite(argv[2], out_mat);
- return 0;
-}
-#+END_SRC
-
-*** OpenCV G-API :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.5
- :END:
-#+BEGIN_SRC C++
-#include
-#include
-#include
-#include
-
-int main(int argc, char *argv[]) {
- using namespace cv;
- if (argc != 3) return 1;
-
- GMat in;
- GMat gx = gapi::Sobel(in, CV_32F, 1, 0);
- GMat gy = gapi::Sobel(in, CV_32F, 0, 1);
- GMat mag = gapi::sqrt( gapi::mul(gx, gx)
- + gapi::mul(gy, gy));
- GMat out = gapi::convertTo(mag, CV_8U);
- GComputation sobel(GIn(in), GOut(out));
-
- Mat in_mat = imread(argv[1]), out_mat;
- sobel.apply(in_mat, out_mat);
- imwrite(argv[2], out_mat);
- return 0;
-}
-#+END_SRC
-
-** The code is worth a thousand words (cont'd)
-# FIXME: sections!!!
-
-*** What we have just learned?
-
-- G-API functions mimic their traditional OpenCV ancestors;
-- No real data is required to construct a graph;
-- Graph construction and graph execution are separate steps.
-
-*** What else?
-
-- Graph is first /expressed/ and then /captured/ in an object;
-- Graph constructor defines /protocol/; user can pass vectors of
- inputs/outputs like
- #+BEGIN_SRC C++
-cv::GComputation(cv::GIn(...), cv::GOut(...))
- #+END_SRC
-- Calls to ~.apply()~ must conform to graph's protocol
-
-** On data objects
-
-Graph *protocol* defines what arguments a computation was defined on
-(both inputs and outputs), and what are the *shapes* (or types) of
-those arguments:
-
- | *Shape* | *Argument* | Size |
- |--------------+------------------+-----------------------------|
- | ~GMat~ | ~Mat~ | Static; defined during |
- | | | graph compilation |
- |--------------+------------------+-----------------------------|
- | ~GScalar~ | ~Scalar~ | 4 x ~double~ |
- |--------------+------------------+-----------------------------|
- | ~GArray~ | ~std::vector~ | Dynamic; defined in runtime |
- |--------------+------------------+-----------------------------|
- | ~GOpaque~ | ~T~ | Static, ~sizeof(T)~ |
-
-~GScalar~ may be value-initialized at construction time to allow
- expressions like ~GMat a = 2*(b + 1)~.
-
-** On operations and kernels
- :PROPERTIES:
- :BEAMER_opt: shrink=22
- :END:
-
-*** :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.45
- :END:
-
-- Graphs are built with *Operations* over virtual *Data*;
-- *Operations* define interfaces (literally);
-- *Kernels* are implementations to *Operations* (like in OOP);
-- An *Operation* is platform-agnostic, a *kernel* is not;
-- *Kernels* are implemented for *Backends*, the latter provide
- APIs to write kernels;
-- Users can /add/ their *own* operations and kernels,
- and also /redefine/ "standard" kernels their *own* way.
-
-*** :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.45
- :END:
-
-#+BEGIN_SRC dot :file "000-ops-kernels.eps" :cmdline "-Kdot -Teps"
-digraph G {
-node [shape=box];
-rankdir=BT;
-
-Gr [label="Graph"];
-Op [label="Operation\nA"];
-{rank=same
-Impl1 [label="Kernel\nA:2"];
-Impl2 [label="Kernel\nA:1"];
-}
-
-Op -> Gr [dir=back, label="'consists of'"];
-Impl1 -> Op [];
-Impl2 -> Op [label="'is implemented by'"];
-
-node [shape=note,style=dashed];
-{rank=same
-Op;
-CommentOp [label="Abstract:\ndeclared via\nG_API_OP()"];
-}
-{rank=same
-Comment1 [label="Platform:\ndefined with\nOpenCL backend"];
-Comment2 [label="Platform:\ndefined with\nOpenCV backend"];
-}
-
-CommentOp -> Op [constraint=false, style=dashed, arrowhead=none];
-Comment1 -> Impl1 [style=dashed, arrowhead=none];
-Comment2 -> Impl2 [style=dashed, arrowhead=none];
-}
-#+END_SRC
-
-** On operations and kernels (cont'd)
-
-*** Defining an operation
-
-- A type name (every operation is a C++ type);
-- Operation signature (similar to ~std::function<>~);
-- Operation identifier (a string);
-- Metadata callback -- describe what is the output value format(s),
- given the input and arguments.
-- Use ~OpType::on(...)~ to use a new kernel ~OpType~ to construct graphs.
-
-#+LaTeX: {\footnotesize
-#+BEGIN_SRC C++
-G_API_OP(GSqrt,,"org.opencv.core.math.sqrt") {
- static GMatDesc outMeta(GMatDesc in) { return in; }
-};
-#+END_SRC
-#+LaTeX: }
-
-** On operations and kernels (cont'd)
-
-*** ~GSqrt~ vs. ~cv::gapi::sqrt()~
-
-- How a *type* relates to a *functions* from the example?
-- These functions are just wrappers over ~::on~:
- #+LaTeX: {\scriptsize
- #+BEGIN_SRC C++
- G_API_OP(GSqrt,,"org.opencv.core.math.sqrt") {
- static GMatDesc outMeta(GMatDesc in) { return in; }
- };
- GMat gapi::sqrt(const GMat& src) { return GSqrt::on(src); }
- #+END_SRC
- #+LaTeX: }
-- Why -- Doxygen, default parameters, 1:n mapping:
- #+LaTeX: {\scriptsize
- #+BEGIN_SRC C++
- cv::GMat custom::unsharpMask(const cv::GMat &src,
- const int sigma,
- const float strength) {
- cv::GMat blurred = cv::gapi::medianBlur(src, sigma);
- cv::GMat laplacian = cv::gapi::Laplacian(blurred, CV_8U);
- return (src - (laplacian * strength));
- }
- #+END_SRC
- #+LaTeX: }
-
-** On operations and kernels (cont'd)
-
-*** Implementing an operation
-
-- Depends on the backend and its API;
-- Common part for all backends: refer to operation being implemented
- using its /type/.
-
-*** OpenCV backend
-- OpenCV backend is the default one: OpenCV kernel is a wrapped OpenCV
- function:
- #+LaTeX: {\footnotesize
- #+BEGIN_SRC C++
- GAPI_OCV_KERNEL(GCPUSqrt, cv::gapi::core::GSqrt) {
- static void run(const cv::Mat& in, cv::Mat &out) {
- cv::sqrt(in, out);
- }
- };
- #+END_SRC
- #+LaTeX: }
-
-** Operations and Kernels (cont'd)
-# FIXME!!!
-
-*** Fluid backend
-
-- Fluid backend operates with row-by-row kernels and schedules its
- execution to optimize data locality:
- #+LaTeX: {\footnotesize
- #+BEGIN_SRC C++
- GAPI_FLUID_KERNEL(GFluidSqrt, cv::gapi::core::GSqrt, false) {
- static const int Window = 1;
- static void run(const View &in, Buffer &out) {
- hal::sqrt32f(in .InLine (0)
- out.OutLine(0),
- out.length());
- }
- };
- #+END_SRC
- #+LaTeX: }
-- Note ~run~ changes signature but still is derived from the operation
- signature.
-
-** Operations and Kernels (cont'd)
-
-*** Specifying which kernels to use
-
-- Graph execution model is defined by kernels which are available/used;
-- Kernels can be specified via the graph compilation arguments:
- #+LaTeX: {\footnotesize
- #+BEGIN_SRC C++
- #include
- #include
- ...
- auto pkg = cv::gapi::combine(cv::gapi::core::fluid::kernels(),
- cv::gapi::imgproc::fluid::kernels());
- sobel.apply(in_mat, out_mat, cv::compile_args(pkg));
- #+END_SRC
- #+LaTeX: }
-- Users can combine kernels of different backends and G-API will partition
- the execution among those automatically.
-
-** Heterogeneity in G-API
- :PROPERTIES:
- :BEAMER_opt: shrink=35
- :END:
-*** Automatic subgraph partitioning in G-API
-*** :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.18
- :END:
-
-#+BEGIN_SRC dot :file "010-hetero-init.eps" :cmdline "-Kdot -Teps"
-digraph G {
-rankdir=TB;
-ranksep=0.3;
-
-node [shape=box margin=0 height=0.25];
-A; B; C;
-
-node [shape=ellipse];
-GMat0;
-GMat1;
-GMat2;
-GMat3;
-
-GMat0 -> A -> GMat1 -> B -> GMat2;
-GMat2 -> C;
-GMat0 -> C -> GMat3
-
-subgraph cluster {style=invis; A; GMat1; B; GMat2; C};
-}
-#+END_SRC
-
-The initial graph: operations are not resolved yet.
-
-*** :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.18
- :END:
-
-#+BEGIN_SRC dot :file "011-hetero-homo.eps" :cmdline "-Kdot -Teps"
-digraph G {
-rankdir=TB;
-ranksep=0.3;
-
-node [shape=box margin=0 height=0.25];
-A; B; C;
-
-node [shape=ellipse];
-GMat0;
-GMat1;
-GMat2;
-GMat3;
-
-GMat0 -> A -> GMat1 -> B -> GMat2;
-GMat2 -> C;
-GMat0 -> C -> GMat3
-
-subgraph cluster {style=filled;color=azure2; A; GMat1; B; GMat2; C};
-}
-#+END_SRC
-
-All operations are handled by the same backend.
-
-*** :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.18
- :END:
-
-#+BEGIN_SRC dot :file "012-hetero-a.eps" :cmdline "-Kdot -Teps"
-digraph G {
-rankdir=TB;
-ranksep=0.3;
-
-node [shape=box margin=0 height=0.25];
-A; B; C;
-
-node [shape=ellipse];
-GMat0;
-GMat1;
-GMat2;
-GMat3;
-
-GMat0 -> A -> GMat1 -> B -> GMat2;
-GMat2 -> C;
-GMat0 -> C -> GMat3
-
-subgraph cluster_1 {style=filled;color=azure2; A; GMat1; B; }
-subgraph cluster_2 {style=filled;color=ivory2; C};
-}
-#+END_SRC
-
-~A~ & ~B~ are of backend ~1~, ~C~ is of backend ~2~.
-
-*** :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.18
- :END:
-
-#+BEGIN_SRC dot :file "013-hetero-b.eps" :cmdline "-Kdot -Teps"
-digraph G {
-rankdir=TB;
-ranksep=0.3;
-
-node [shape=box margin=0 height=0.25];
-A; B; C;
-
-node [shape=ellipse];
-GMat0;
-GMat1;
-GMat2;
-GMat3;
-
-GMat0 -> A -> GMat1 -> B -> GMat2;
-GMat2 -> C;
-GMat0 -> C -> GMat3
-
-subgraph cluster_1 {style=filled;color=azure2; A};
-subgraph cluster_2 {style=filled;color=ivory2; B};
-subgraph cluster_3 {style=filled;color=azure2; C};
-}
-#+END_SRC
-
-~A~ & ~C~ are of backend ~1~, ~B~ is of backend ~2~.
-
-** Heterogeneity in G-API
-
-*** Heterogeneity summary
-
-- G-API automatically partitions its graph in subgraphs (called "islands")
- based on the available kernels;
-- Adjacent kernels taken from the same backend are "fused" into the same
- "island";
-- G-API implements a two-level execution model:
- - Islands are executed at the top level by a G-API's *Executor*;
- - Island internals are run at the bottom level by its *Backend*;
-- G-API fully delegates the low-level execution and memory management to backends.
-
-* Inference and Streaming
-
-** Inference with G-API
-
-*** In-graph inference example
-
-- Starting with OpencV 4.2 (2019), G-API allows to integrate ~infer~
- operations into the graph:
- #+LaTeX: {\scriptsize
- #+BEGIN_SRC C++
- G_API_NET(ObjDetect, , "pdf.example.od");
-
- cv::GMat in;
- cv::GMat blob = cv::gapi::infer(bgr);
- cv::GOpaque size = cv::gapi::streaming::size(bgr);
- cv::GArray objs = cv::gapi::streaming::parseSSD(blob, size);
- cv::GComputation pipelne(cv::GIn(in), cv::GOut(objs));
- #+END_SRC
- #+LaTeX: }
-- Starting with OpenCV 4.5 (2020), G-API will provide more streaming-
- and NN-oriented operations out of the box.
-
-** Inference with G-API
-
-*** What is the difference?
-
-- ~ObjDetect~ is not an operation, ~cv::gapi::infer~ is;
-- ~cv::gapi::infer~ is a *generic* operation, where ~T=ObjDetect~ describes
- the calling convention:
- - How many inputs the network consumes,
- - How many outputs the network produces.
-- Inference data types are ~GMat~ only:
- - Representing an image, then preprocessed automatically;
- - Representing a blob (n-dimensional ~Mat~), then passed as-is.
-- Inference *backends* only need to implement a single generic operation ~infer~.
-
-** Inference with G-API
-
-*** But how does it run?
-
-- Since ~infer~ is an *Operation*, backends may provide *Kernels* implementing it;
-- The only publicly available inference backend now is *OpenVINO™*:
- - Brings its ~infer~ kernel atop of the Inference Engine;
-- NN model data is passed through G-API compile arguments (like kernels);
-- Every NN backend provides its own structure to configure the network (like
- a kernel API).
-
-** Inference with G-API
-
-*** Passing OpenVINO™ parameters to G-API
-
-- ~ObjDetect~ example:
- #+LaTeX: {\footnotesize
- #+BEGIN_SRC C++
- auto face_net = cv::gapi::ie::Params {
- face_xml_path, // path to the topology IR
- face_bin_path, // path to the topology weights
- face_device_string, // OpenVINO plugin (device) string
- };
- auto networks = cv::gapi::networks(face_net);
- pipeline.compile(.., cv::compile_args(..., networks));
- #+END_SRC
- #+LaTeX: }
-- ~AgeGender~ requires binding Op's outputs to NN layers:
- #+LaTeX: {\footnotesize
- #+BEGIN_SRC C++
- auto age_net = cv::gapi::ie::Params {
- ...
- }.cfgOutputLayers({"age_conv3", "prob"}); // array !
- #+END_SRC
- #+LaTeX: }
-
-** Streaming with G-API
-
-#+BEGIN_SRC dot :file 020-fd-demo.eps :cmdline "-Kdot -Teps"
-digraph {
- rankdir=LR;
- node [shape=box];
-
- cap [label=Capture];
- dec [label=Decode];
- res [label=Resize];
- cnn [label=Infer];
- vis [label=Visualize];
-
- cap -> dec;
- dec -> res;
- res -> cnn;
- cnn -> vis;
-}
-#+END_SRC
-Anatomy of a regular video analytics application
-
-** Streaming with G-API
-
-#+BEGIN_SRC dot :file 021-fd-serial.eps :cmdline "-Kdot -Teps"
-digraph {
- node [shape=box margin=0 width=0.3 height=0.4]
- nodesep=0.2;
- rankdir=LR;
-
- subgraph cluster0 {
- colorscheme=blues9
- pp [label="..." shape=plaintext];
- v0 [label=V];
- label="Frame N-1";
- color=7;
- }
-
- subgraph cluster1 {
- colorscheme=blues9
- c1 [label=C];
- d1 [label=D];
- r1 [label=R];
- i1 [label=I];
- v1 [label=V];
- label="Frame N";
- color=6;
- }
-
- subgraph cluster2 {
- colorscheme=blues9
- c2 [label=C];
- nn [label="..." shape=plaintext];
- label="Frame N+1";
- color=5;
- }
-
- c1 -> d1 -> r1 -> i1 -> v1;
-
- pp-> v0;
- v0 -> c1 [style=invis];
- v1 -> c2 [style=invis];
- c2 -> nn;
-}
-#+END_SRC
-Serial execution of the sample video analytics application
-
-** Streaming with G-API
- :PROPERTIES:
- :BEAMER_opt: shrink
- :END:
-
-#+BEGIN_SRC dot :file 022-fd-pipelined.eps :cmdline "-Kdot -Teps"
-digraph {
- nodesep=0.2;
- ranksep=0.2;
- node [margin=0 width=0.4 height=0.2];
- node [shape=plaintext]
- Camera [label="Camera:"];
- GPU [label="GPU:"];
- FPGA [label="FPGA:"];
- CPU [label="CPU:"];
- Time [label="Time:"];
- t6 [label="T6"];
- t7 [label="T7"];
- t8 [label="T8"];
- t9 [label="T9"];
- t10 [label="T10"];
- tnn [label="..."];
-
- node [shape=box margin=0 width=0.4 height=0.4 colorscheme=blues9]
- node [color=9] V3;
- node [color=8] F4; V4;
- node [color=7] DR5; F5; V5;
- node [color=6] C6; DR6; F6; V6;
- node [color=5] C7; DR7; F7; V7;
- node [color=4] C8; DR8; F8;
- node [color=3] C9; DR9;
- node [color=2] C10;
-
- {rank=same; rankdir=LR; Camera C6 C7 C8 C9 C10}
- Camera -> C6 -> C7 -> C8 -> C9 -> C10 [style=invis];
-
- {rank=same; rankdir=LR; GPU DR5 DR6 DR7 DR8 DR9}
- GPU -> DR5 -> DR6 -> DR7 -> DR8 -> DR9 [style=invis];
-
- C6 -> DR5 [style=invis];
- C6 -> DR6 [constraint=false];
- C7 -> DR7 [constraint=false];
- C8 -> DR8 [constraint=false];
- C9 -> DR9 [constraint=false];
-
- {rank=same; rankdir=LR; FPGA F4 F5 F6 F7 F8}
- FPGA -> F4 -> F5 -> F6 -> F7 -> F8 [style=invis];
-
- DR5 -> F4 [style=invis];
- DR5 -> F5 [constraint=false];
- DR6 -> F6 [constraint=false];
- DR7 -> F7 [constraint=false];
- DR8 -> F8 [constraint=false];
-
- {rank=same; rankdir=LR; CPU V3 V4 V5 V6 V7}
- CPU -> V3 -> V4 -> V5 -> V6 -> V7 [style=invis];
-
- F4 -> V3 [style=invis];
- F4 -> V4 [constraint=false];
- F5 -> V5 [constraint=false];
- F6 -> V6 [constraint=false];
- F7 -> V7 [constraint=false];
-
- {rank=same; rankdir=LR; Time t6 t7 t8 t9 t10 tnn}
- Time -> t6 -> t7 -> t8 -> t9 -> t10 -> tnn [style=invis];
-
- CPU -> Time [style=invis];
- V3 -> t6 [style=invis];
- V4 -> t7 [style=invis];
- V5 -> t8 [style=invis];
- V6 -> t9 [style=invis];
- V7 -> t10 [style=invis];
-}
-#+END_SRC
-Pipelined execution for the video analytics application
-
-** Streaming with G-API: Example
-
-**** Serial mode (4.0) :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.45
- :END:
-#+LaTeX: {\tiny
-#+BEGIN_SRC C++
-pipeline = cv::GComputation(...);
-
-cv::VideoCapture cap(input);
-cv::Mat in_frame;
-std::vector out_faces;
-
-while (cap.read(in_frame)) {
- pipeline.apply(cv::gin(in_frame),
- cv::gout(out_faces),
- cv::compile_args(kernels,
- networks));
- // Process results
- ...
-}
-#+END_SRC
-#+LaTeX: }
-
-**** Streaming mode (since 4.2) :B_block:BMCOL:
- :PROPERTIES:
- :BEAMER_env: block
- :BEAMER_col: 0.45
- :END:
-#+LaTeX: {\tiny
-#+BEGIN_SRC C++
-pipeline = cv::GComputation(...);
-
-auto in_src = cv::gapi::wip::make_src
- (input)
-auto cc = pipeline.compileStreaming
- (cv::compile_args(kernels, networks))
-cc.setSource(cv::gin(in_src));
-cc.start();
-
-std::vector out_faces;
-while (cc.pull(cv::gout(out_faces))) {
- // Process results
- ...
-}
-#+END_SRC
-#+LaTeX: }
-
-**** More information
-
-#+LaTeX: {\footnotesize
-https://opencv.org/hybrid-cv-dl-pipelines-with-opencv-4-4-g-api/
-#+LaTeX: }
-
-* Latest features
-** Latest features
-*** Python API
-
-- Initial Python3 binding is available now in ~master~ (future 4.5);
-- Only basic CV functionality is supported (~core~ & ~imgproc~ namespaces,
- selecting backends);
-- Adding more programmability, inference, and streaming is next.
-
-** Latest features
-*** Python API
-
-#+LaTeX: {\footnotesize
-#+BEGIN_SRC Python
-import numpy as np
-import cv2 as cv
-
-sz = (1280, 720)
-in1 = np.random.randint(0, 100, sz).astype(np.uint8)
-in2 = np.random.randint(0, 100, sz).astype(np.uint8)
-
-g_in1 = cv.GMat()
-g_in2 = cv.GMat()
-g_out = cv.gapi.add(g_in1, g_in2)
-gr = cv.GComputation(g_in1, g_in2, g_out)
-
-pkg = cv.gapi.core.fluid.kernels()
-out = gr.apply(in1, in2, args=cv.compile_args(pkg))
-#+END_SRC
-#+LaTeX: }
-
-* Understanding the "G-Effect"
-
-** Understanding the "G-Effect"
-
-*** What is "G-Effect"?
-
-- G-API is not only an API, but also an /implementation/;
- - i.e. it does some work already!
-- We call "G-Effect" any measurable improvement which G-API demonstrates
- against traditional methods;
-- So far the list is:
- - Memory consumption;
- - Performance;
- - Programmer efforts.
-
-Note: in the following slides, all measurements are taken on
-Intel\textregistered{} Core\texttrademark-i5 6600 CPU.
-
-** Understanding the "G-Effect"
-# FIXME
-
-*** Memory consumption: Sobel Edge Detector
-
-- G-API/Fluid backend is designed to minimize footprint:
-#+LaTeX: {\footnotesize
-| Input | OpenCV | G-API/Fluid | Factor |
-| | MiB | MiB | Times |
-|-------------+--------+-------------+--------|
-| 512 x 512 | 17.33 | 0.59 | 28.9x |
-| 640 x 480 | 20.29 | 0.62 | 32.8x |
-| 1280 x 720 | 60.73 | 0.72 | 83.9x |
-| 1920 x 1080 | 136.53 | 0.83 | 164.7x |
-| 3840 x 2160 | 545.88 | 1.22 | 447.4x |
-#+LaTeX: }
-- The detector itself can be written manually in two ~for~
- loops, but G-API covers cases more complex than that;
-- OpenCV code requires changes to shrink footprint.
-
-** Understanding the "G-Effect"
-
-*** Performance: Sobel Edge Detector
-
-- G-API/Fluid backend also optimizes cache reuse:
-
-#+LaTeX: {\footnotesize
-| Input | OpenCV | G-API/Fluid | Factor |
-| | ms | ms | Times |
-|-------------+--------+-------------+--------|
-| 320 x 240 | 1.16 | 0.53 | 2.17x |
-| 640 x 480 | 5.66 | 1.89 | 2.99x |
-| 1280 x 720 | 17.24 | 5.26 | 3.28x |
-| 1920 x 1080 | 39.04 | 12.29 | 3.18x |
-| 3840 x 2160 | 219.57 | 51.22 | 4.29x |
-#+LaTeX: }
-
-- The more data is processed, the bigger "G-Effect" is.
-
-** Understanding the "G-Effect"
-
-*** Relative speed-up based on cache efficiency
-
-#+BEGIN_LATEX
-\begin{figure}
- \begin{tikzpicture}
- \begin{axis}[
- xlabel={Image size},
- ylabel={Relative speed-up},
- nodes near coords,
- width=0.8\textwidth,
- xtick=data,
- xticklabels={QVGA, VGA, HD, FHD, UHD},
- height=4.5cm,
- ]
-
- \addplot plot coordinates {(1, 1.0) (2, 1.38) (3, 1.51) (4, 1.46) (5, 1.97)};
-
- \end{axis}
- \end{tikzpicture}
-\end{figure}
-#+END_LATEX
-
-The higher resolution is, the higher relative speed-up is (with
-speed-up on QVGA taken as 1.0).
-
-* Resources on G-API
-
-** Resources on G-API
- :PROPERTIES:
- :BEAMER_opt: shrink
- :END:
-*** Repository
-
-- https://github.com/opencv/opencv (see ~modules/gapi~)
-
-*** Article
-
-- https://opencv.org/hybrid-cv-dl-pipelines-with-opencv-4-4-g-api/
-
-*** Documentation
-
-- https://docs.opencv.org/4.4.0/d0/d1e/gapi.html
-
-*** Tutorials
-- https://docs.opencv.org/4.4.0/df/d7e/tutorial_table_of_content_gapi.html
-
-* Thank you!
diff --git a/modules/gapi/doc/slides/get_sty.sh b/modules/gapi/doc/slides/get_sty.sh
deleted file mode 100755
index 0b97cf3ece..0000000000
--- a/modules/gapi/doc/slides/get_sty.sh
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env bash
-
-set -e
-
-MTHEME_VER=2fa6084b9d34fec9d2d5470eb9a17d0bf712b6c8
-MTHEME_DIR=mtheme.sty
-
-function make_sty {
- if [ -d "$MTHEME_DIR" ]; then rm -rf "$MTHEME_DIR"; fi
- mkdir "$MTHEME_DIR"
-
- # Download template from Github
- tmp_dir=$(mktemp -d)
- wget -P "$tmp_dir" -c https://github.com/matze/mtheme/archive/${MTHEME_VER}.tar.gz
- pushd "$tmp_dir"
- tar -xzvf "$MTHEME_VER.tar.gz"
- popd
- make -C "$tmp_dir"/mtheme-"$MTHEME_VER"
- cp -v "$tmp_dir"/mtheme-"$MTHEME_VER"/*.sty "$MTHEME_DIR"
- rm -r "$tmp_dir"
- # Put our own .gitignore to ignore this directory completely
- echo "*" > "$MTHEME_DIR/.gitignore"
-}
-
-make_sty
diff --git a/modules/gapi/doc/slides/ocv_logo.eps b/modules/gapi/doc/slides/ocv_logo.eps
deleted file mode 100644
index 67005bd3d0..0000000000
--- a/modules/gapi/doc/slides/ocv_logo.eps
+++ /dev/null
@@ -1,181 +0,0 @@
-%!PS-Adobe-3.0 EPSF-3.0
-%%Creator: cairo 1.14.6 (http://cairographics.org)
-%%CreationDate: Wed Dec 12 17:03:17 2018
-%%Pages: 1
-%%DocumentData: Clean7Bit
-%%LanguageLevel: 2
-%%BoundingBox: 0 -1 598 739
-%%EndComments
-%%BeginProlog
-save
-50 dict begin
-/q { gsave } bind def
-/Q { grestore } bind def
-/cm { 6 array astore concat } bind def
-/w { setlinewidth } bind def
-/J { setlinecap } bind def
-/j { setlinejoin } bind def
-/M { setmiterlimit } bind def
-/d { setdash } bind def
-/m { moveto } bind def
-/l { lineto } bind def
-/c { curveto } bind def
-/h { closepath } bind def
-/re { exch dup neg 3 1 roll 5 3 roll moveto 0 rlineto
- 0 exch rlineto 0 rlineto closepath } bind def
-/S { stroke } bind def
-/f { fill } bind def
-/f* { eofill } bind def
-/n { newpath } bind def
-/W { clip } bind def
-/W* { eoclip } bind def
-/BT { } bind def
-/ET { } bind def
-/pdfmark where { pop globaldict /?pdfmark /exec load put }
- { globaldict begin /?pdfmark /pop load def /pdfmark
- /cleartomark load def end } ifelse
-/BDC { mark 3 1 roll /BDC pdfmark } bind def
-/EMC { mark /EMC pdfmark } bind def
-/cairo_store_point { /cairo_point_y exch def /cairo_point_x exch def } def
-/Tj { show currentpoint cairo_store_point } bind def
-/TJ {
- {
- dup
- type /stringtype eq
- { show } { -0.001 mul 0 cairo_font_matrix dtransform rmoveto } ifelse
- } forall
- currentpoint cairo_store_point
-} bind def
-/cairo_selectfont { cairo_font_matrix aload pop pop pop 0 0 6 array astore
- cairo_font exch selectfont cairo_point_x cairo_point_y moveto } bind def
-/Tf { pop /cairo_font exch def /cairo_font_matrix where
- { pop cairo_selectfont } if } bind def
-/Td { matrix translate cairo_font_matrix matrix concatmatrix dup
- /cairo_font_matrix exch def dup 4 get exch 5 get cairo_store_point
- /cairo_font where { pop cairo_selectfont } if } bind def
-/Tm { 2 copy 8 2 roll 6 array astore /cairo_font_matrix exch def
- cairo_store_point /cairo_font where { pop cairo_selectfont } if } bind def
-/g { setgray } bind def
-/rg { setrgbcolor } bind def
-/d1 { setcachedevice } bind def
-%%EndProlog
-%%BeginSetup
-%%EndSetup
-%%Page: 1 1
-%%BeginPageSetup
-%%PageBoundingBox: 0 -1 598 739
-%%EndPageSetup
-q 0 -1 598 740 rectclip q
-1 0.00392157 0.00392157 rg
-225.648 478.363 m 171.051 509.887 144.43 574.156 160.746 635.051 c 177.066
- 695.945 232.254 738.277 295.301 738.277 c 358.348 738.277 413.535 695.945
- 429.855 635.051 c 446.172 574.156 419.551 509.887 364.949 478.363 c 323.008
- 551.008 l 344.73 563.547 355.324 589.117 348.832 613.34 c 342.34 637.566
- 320.383 654.41 295.301 654.41 c 270.219 654.41 248.262 637.566 241.77 613.34
- c 235.277 589.117 245.871 563.547 267.59 551.008 c h
-225.648 478.363 m f
-0.00392157 0.00392157 1 rg
-523.949 444.637 m 578.551 413.113 605.172 348.844 588.855 287.949 c 572.535
- 227.055 517.348 184.723 454.301 184.723 c 391.254 184.723 336.066 227.055
- 319.746 287.949 c 303.43 348.844 330.051 413.113 384.648 444.637 c 426.59
- 371.992 l 404.871 359.453 394.277 333.883 400.77 309.66 c 407.262 285.434
- 429.219 268.59 454.301 268.59 c 479.383 268.59 501.34 285.434 507.832 309.66
- c 514.324 333.883 503.73 359.453 482.008 371.992 c h
-523.949 444.637 m f
-0.00392157 1 0.00392157 rg
-278.602 324 m 278.602 260.953 236.254 205.762 175.359 189.449 c 114.461
- 173.133 50.207 199.762 18.684 254.363 c -12.84 308.961 -3.773 377.922 40.805
- 422.504 c 85.383 467.082 154.352 476.164 208.949 444.637 c 167.008 371.992
- l 145.289 384.535 117.852 380.922 100.117 363.188 c 82.383 345.453 78.773
- 318.016 91.316 296.297 c 103.855 274.574 129.418 263.98 153.645 270.473
- c 177.871 276.961 194.719 298.918 194.719 324 c h
-278.602 324 m f
-0.0196078 g
-39.781 151.301 m 51.57 152.359 63.492 152.352 75.223 150.672 c 82.449 149.391
- 90.121 147.52 95.551 142.25 c 101.242 135.898 102.641 127.078 103.891 118.949
- c 105.941 102.078 105.699 84.969 103.891 68.09 c 102.68 59.852 101.492
-50.949 96.09 44.25 c 90.199 38.27 81.5 36.57 73.52 35.309 c 61.742 33.84
- 49.789 33.5 37.961 34.68 c 29.949 35.5 21.59 36.91 14.77 41.48 c 10.359
- 44.281 7.992 49.219 6.379 54.012 c 3.152 63.988 2.742 74.59 2.301 84.988
- c 2.25 98.73 2.512 112.609 5.191 126.129 c 6.641 132.441 8.402 139.379
-13.73 143.59 c 21.242 149.039 30.789 150.359 39.781 151.301 c h
-41.73 132.469 m 51.723 133.27 61.922 133.512 71.801 131.57 c 75.629 130.801
- 80.152 128.941 80.871 124.578 c 83.871 112.309 83.172 99.531 83.289 86.988
- c 82.922 78.07 83.129 68.852 80.141 60.309 c 77.531 54.699 70.422 54.238
- 65.062 53.422 c 54.312 52.809 43.152 52.27 32.723 55.461 c 27.91 56.73
-26.391 61.891 25.652 66.219 c 23.652 79.051 24.301 92.102 24.551 105.031
- c 25.082 112.281 24.992 119.801 27.602 126.691 c 30.59 131.309 36.77 131.719
- 41.73 132.469 c h
-41.73 132.469 m f*
-147.07 112.219 m 154.23 116.77 163.121 117.512 171.379 116.762 c 179.09
- 116.102 187.652 113.48 191.781 106.379 c 196.711 97.469 196.992 86.941
-197.332 77 c 197.109 66.781 196.922 56.109 192.699 46.609 c 190.289 40.84
- 184.75 37.059 178.82 35.57 c 169.742 33.34 159.762 33.102 151.012 36.719
- c 146.281 38.57 143.012 42.59 140.301 46.711 c 140.301 0 l 120.301 0 l
-120.312 38.66 120.281 77.328 120.312 115.988 c 126.781 116.02 133.25 116.02
- 139.711 115.988 c 139.492 112.012 139.27 108.039 139.16 104.051 c 141.562
- 106.98 143.789 110.199 147.07 112.219 c h
-153.582 101.781 m 159.18 102.211 165.102 102.328 170.34 100.02 c 173.66
- 98.59 175.41 95.078 176 91.68 c 177.742 82.91 177.52 73.852 176.902 64.969
- c 176.281 59.609 175.422 52.672 169.52 50.59 c 162.699 48.359 154.922 48.219
- 148.18 50.828 c 141.91 53.469 141.18 61.059 140.562 66.949 c 140.191 75.988
- 139.742 85.289 142.289 94.07 c 143.641 99.051 148.82 101.41 153.582 101.781
- c h
-153.582 101.781 m f*
-221.262 112.07 m 231.09 117.121 242.602 117.301 253.391 116.789 c 262.371
- 116.039 273.27 114.539 278.223 105.949 c 283.801 95.578 282.891 83.379
-283.672 72 c 228.961 72 l 229.602 66.129 228.84 59.801 231.801 54.422 c
-234.332 50.172 239.699 49.301 244.242 49.051 c 249.852 49.012 255.891 48.551
- 261.062 51.16 c 264.02 53.48 264.039 57.602 264.422 61 c 270.82 61.012
-277.223 61.012 283.621 61 c 283.379 54.32 282.52 46.84 277.16 42.141 c 269.109
- 34.922 257.59 34.172 247.289 33.969 c 238.199 34.238 228.602 34.699 220.461
- 39.18 c 213.871 43.07 211.77 51.059 210.609 58.102 c 209.141 68.559 208.77
- 79.219 210.02 89.719 c 211.039 98.012 213.27 107.762 221.262 112.07 c h
-232.949 99.34 m 238.41 102.66 245.172 101.988 251.301 101.898 c 255.102
- 101.488 259.73 101.27 262.199 97.91 c 264.723 93.762 264.27 88.68 264.289
- 84.02 c 252.52 84 240.762 83.969 229 84.031 c 229.18 89.211 228.77 95.531
- 232.949 99.34 c h
-232.949 99.34 m f*
-326.262 112.121 m 333.18 116.922 342.121 117.59 350.262 116.648 c 357.191
- 115.922 364.531 113.281 368.621 107.301 c 372.25 102.34 373.262 96.02 373.312
- 90.012 c 373.281 71.672 373.32 53.34 373.301 35 c 366.961 34.988 360.629
- 34.988 354.312 35 c 354.281 52.352 354.332 69.691 354.281 87.031 c 354.09
- 90.82 354.242 95.199 351.391 98.121 c 348.352 101.41 343.582 102.051 339.332
- 102.02 c 334.191 102.051 328.629 101.172 324.672 97.621 c 320.801 94.32
- 319.332 89 319.312 84.078 c 319.281 67.719 319.32 51.359 319.289 35.012
- c 312.961 34.988 306.629 34.988 300.312 35 c 300.301 62 300.301 89 300.312
- 116 c 306.531 116.02 312.762 116.012 318.98 116 c 318.949 111.262 318.48
- 106.551 318.34 101.809 c 320.379 105.641 322.52 109.68 326.262 112.121
-c h
-326.262 112.121 m f*
-407.691 147.602 m 418.172 151.121 429.34 151.621 440.301 152.012 c 450.922
- 151.961 462.02 151.859 471.941 147.578 c 476.98 145.48 480.473 140.879
-482.172 135.801 c 484.941 128.211 485.02 119.988 485.082 112 c 477.77 112
- 470.461 111.98 463.16 112.012 c 463.039 117.629 463.473 123.93 459.992
-128.711 c 456.473 132.309 450.973 132.301 446.301 132.852 c 436.801 133.031
- 426.91 133.641 417.812 130.359 c 414.531 129.32 412.832 126.039 412.172
- 122.879 c 410.301 114.398 410.289 105.648 410.301 97 c 410.41 85.441 410.23
- 73.711 412.699 62.34 c 413.352 58.18 417.18 55.621 421.02 54.699 c 429.902
- 52.488 439.172 52.809 448.242 53.352 c 452.973 53.969 458.73 54.281 461.699
- 58.621 c 464.871 63.801 464.34 70.172 464.172 75.988 c 471.551 76.02 478.922
- 76.012 486.301 75.988 c 486.211 66.801 486.051 57.309 482.711 48.609 c
-480.992 44.059 477.441 40.199 472.84 38.461 c 463.812 34.84 453.91 34.609
- 444.332 34.031 c 433.223 33.84 421.973 34.109 411.109 36.699 c 404.742
-38.359 397.781 41.281 394.832 47.609 c 391.062 55.98 390.371 65.289 389.402
- 74.301 c 388.59 86.199 388.07 98.121 388.359 110.039 c 388.93 119.691 389.812
- 129.859 395.02 138.27 c 397.789 142.949 402.652 145.879 407.691 147.602
- c h
-407.691 147.602 m f*
-489.902 150.969 m 497.52 150.961 505.141 151.18 512.75 150.859 c 520.16
- 127.352 528.301 104.078 535.781 80.602 c 538.691 71.578 540.75 62.301 543.762
- 53.309 c 547.129 63.012 549.289 73.09 552.59 82.809 c 559.902 105.52 567.41
- 128.16 574.711 150.871 c 582.23 151.191 589.77 150.91 597.301 151.012 c
- 597.301 148.52 l 584.922 110.789 572.832 72.961 560.699 35.141 c 549.379
- 34.91 538.039 34.879 526.723 35.16 c 514.66 73.828 502.02 112.32 489.902
- 150.969 c h
-489.902 150.969 m f*
-Q Q
-showpage
-%%Trailer
-end restore
-%%EOF
diff --git a/modules/gapi/include/opencv2/gapi.hpp b/modules/gapi/include/opencv2/gapi.hpp
deleted file mode 100644
index 2087641023..0000000000
--- a/modules/gapi/include/opencv2/gapi.hpp
+++ /dev/null
@@ -1,42 +0,0 @@
-// This file is part of OpenCV project.
-// It is subject to the license terms in the LICENSE file found in the top-level directory
-// of this distribution and at http://opencv.org/license.html.
-//
-// Copyright (C) 2018-2021 Intel Corporation
-
-
-#ifndef OPENCV_GAPI_HPP
-#define OPENCV_GAPI_HPP
-
-#include
-
-/** \defgroup gapi_ref G-API framework
-@{
- @defgroup gapi_main_classes G-API Main Classes
- @defgroup gapi_data_objects G-API Data Types
- @{
- @defgroup gapi_meta_args G-API Metadata Descriptors
- @}
- @defgroup gapi_std_backends G-API Standard Backends
- @defgroup gapi_compile_args G-API Graph Compilation Arguments
- @defgroup gapi_serialization G-API Serialization functionality
-@}
- */
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-// Include these files here to avoid cyclic dependency between
-// Desync & GKernel & GComputation & GStreamingCompiled.
-#include
-#include
-
-#endif // OPENCV_GAPI_HPP
diff --git a/modules/gapi/include/opencv2/gapi/core.hpp b/modules/gapi/include/opencv2/gapi/core.hpp
deleted file mode 100644
index 60bb2c5074..0000000000
--- a/modules/gapi/include/opencv2/gapi/core.hpp
+++ /dev/null
@@ -1,1911 +0,0 @@
-// This file is part of OpenCV project.
-// It is subject to the license terms in the LICENSE file found in the top-level directory
-// of this distribution and at http://opencv.org/license.html.
-//
-// Copyright (C) 2018-2020 Intel Corporation
-
-
-#ifndef OPENCV_GAPI_CORE_HPP
-#define OPENCV_GAPI_CORE_HPP
-
-#include
-#include // std::tuple
-
-#include
-#include
-
-#include
-#include
-#include
-#include
-
-/** \defgroup gapi_core G-API Core functionality
-@{
- @defgroup gapi_math Graph API: Math operations
- @defgroup gapi_pixelwise Graph API: Pixelwise operations
- @defgroup gapi_matrixop Graph API: Operations on matrices
- @defgroup gapi_transform Graph API: Image and channel composition functions
-@}
- */
-
-namespace cv { namespace gapi {
-/**
- * @brief This namespace contains G-API Operation Types for OpenCV
- * Core module functionality.
- */
-namespace core {
- using GResize = cv::gapi::imgproc::GResize;
- using GResizeP = cv::gapi::imgproc::GResizeP;
-
- using GMat2 = std::tuple;
- using GMat3 = std::tuple; // FIXME: how to avoid this?
- using GMat4 = std::tuple;
- using GMatScalar = std::tuple;
-
- G_TYPED_KERNEL(GAdd, , "org.opencv.core.math.add") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc b, int ddepth) {
- if (ddepth == -1)
- {
- // OpenCV: When the input arrays in add/subtract/multiply/divide
- // functions have different depths, the output array depth must be
- // explicitly specified!
- // See artim_op() @ arithm.cpp
- GAPI_Assert(a.chan == b.chan);
- GAPI_Assert(a.depth == b.depth);
- return a;
- }
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GAddC, , "org.opencv.core.math.addC") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc, int ddepth) {
- GAPI_Assert(a.chan <= 4);
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GSub, , "org.opencv.core.math.sub") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc b, int ddepth) {
- if (ddepth == -1)
- {
- // This macro should select a larger data depth from a and b
- // considering the number of channels in the same
- // FIXME!!! Clarify if it is valid for sub()
- GAPI_Assert(a.chan == b.chan);
- ddepth = std::max(a.depth, b.depth);
- }
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GSubC, , "org.opencv.core.math.subC") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc, int ddepth) {
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GSubRC,, "org.opencv.core.math.subRC") {
- static GMatDesc outMeta(GScalarDesc, GMatDesc b, int ddepth) {
- return b.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GMul, , "org.opencv.core.math.mul") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc, double, int ddepth) {
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GMulCOld, , "org.opencv.core.math.mulCOld") {
- static GMatDesc outMeta(GMatDesc a, double, int ddepth) {
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GMulC, , "org.opencv.core.math.mulC") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc, int ddepth) {
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GMulS, , "org.opencv.core.math.muls") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a;
- }
- }; // FIXME: Merge with MulC
-
- G_TYPED_KERNEL(GDiv, , "org.opencv.core.math.div") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc b, double, int ddepth) {
- if (ddepth == -1)
- {
- GAPI_Assert(a.depth == b.depth);
- return b;
- }
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GDivC, , "org.opencv.core.math.divC") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc, double, int ddepth) {
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GDivRC, , "org.opencv.core.math.divRC") {
- static GMatDesc outMeta(GScalarDesc, GMatDesc b, double, int ddepth) {
- return b.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GMean, , "org.opencv.core.math.mean") {
- static GScalarDesc outMeta(GMatDesc) {
- return empty_scalar_desc();
- }
- };
-
- G_TYPED_KERNEL_M(GPolarToCart, , "org.opencv.core.math.polarToCart") {
- static std::tuple outMeta(GMatDesc, GMatDesc a, bool) {
- return std::make_tuple(a, a);
- }
- };
-
- G_TYPED_KERNEL_M(GCartToPolar, , "org.opencv.core.math.cartToPolar") {
- static std::tuple outMeta(GMatDesc x, GMatDesc, bool) {
- return std::make_tuple(x, x);
- }
- };
-
- G_TYPED_KERNEL(GPhase, , "org.opencv.core.math.phase") {
- static GMatDesc outMeta(const GMatDesc &inx, const GMatDesc &, bool) {
- return inx;
- }
- };
-
- G_TYPED_KERNEL(GMask, , "org.opencv.core.pixelwise.mask") {
- static GMatDesc outMeta(GMatDesc in, GMatDesc) {
- return in;
- }
- };
-
- G_TYPED_KERNEL(GCmpGT, , "org.opencv.core.pixelwise.compare.cmpGT") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpGE, , "org.opencv.core.pixelwise.compare.cmpGE") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpLE, , "org.opencv.core.pixelwise.compare.cmpLE") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpLT, , "org.opencv.core.pixelwise.compare.cmpLT") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpEQ, , "org.opencv.core.pixelwise.compare.cmpEQ") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpNE, , "org.opencv.core.pixelwise.compare.cmpNE") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpGTScalar, , "org.opencv.core.pixelwise.compare.cmpGTScalar") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpGEScalar, , "org.opencv.core.pixelwise.compare.cmpGEScalar") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpLEScalar, , "org.opencv.core.pixelwise.compare.cmpLEScalar") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpLTScalar, , "org.opencv.core.pixelwise.compare.cmpLTScalar") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpEQScalar, , "org.opencv.core.pixelwise.compare.cmpEQScalar") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GCmpNEScalar, , "org.opencv.core.pixelwise.compare.cmpNEScalar") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a.withDepth(CV_8U);
- }
- };
-
- G_TYPED_KERNEL(GAnd, , "org.opencv.core.pixelwise.bitwise_and") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GAndS, , "org.opencv.core.pixelwise.bitwise_andS") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GOr, , "org.opencv.core.pixelwise.bitwise_or") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GOrS, , "org.opencv.core.pixelwise.bitwise_orS") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GXor, , "org.opencv.core.pixelwise.bitwise_xor") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GXorS, , "org.opencv.core.pixelwise.bitwise_xorS") {
- static GMatDesc outMeta(GMatDesc a, GScalarDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GNot, , "org.opencv.core.pixelwise.bitwise_not") {
- static GMatDesc outMeta(GMatDesc a) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GSelect, , "org.opencv.core.pixelwise.select") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc, GMatDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GMin, , "org.opencv.core.matrixop.min") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GMax, , "org.opencv.core.matrixop.max") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GAbsDiff, , "org.opencv.core.matrixop.absdiff") {
- static GMatDesc outMeta(GMatDesc a, GMatDesc) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GAbsDiffC, , "org.opencv.core.matrixop.absdiffC") {
- static GMatDesc outMeta(const GMatDesc& a, const GScalarDesc&) {
- return a;
- }
- };
-
- G_TYPED_KERNEL(GSum, , "org.opencv.core.matrixop.sum") {
- static GScalarDesc outMeta(GMatDesc) {
- return empty_scalar_desc();
- }
- };
-
- G_TYPED_KERNEL(GCountNonZero, (GMat)>, "org.opencv.core.matrixop.countNonZero") {
- static GOpaqueDesc outMeta(GMatDesc in) {
- GAPI_Assert(in.chan == 1);
- return empty_gopaque_desc();
- }
- };
-
- G_TYPED_KERNEL(GAddW, , "org.opencv.core.matrixop.addweighted") {
- static GMatDesc outMeta(GMatDesc a, double, GMatDesc b, double, double, int ddepth) {
- if (ddepth == -1)
- {
- // OpenCV: When the input arrays in add/subtract/multiply/divide
- // functions have different depths, the output array depth must be
- // explicitly specified!
- // See artim_op() @ arithm.cpp
- GAPI_Assert(a.chan == b.chan);
- GAPI_Assert(a.depth == b.depth);
- return a;
- }
- return a.withDepth(ddepth);
- }
- };
-
- G_TYPED_KERNEL(GNormL1, , "org.opencv.core.matrixop.norml1") {
- static GScalarDesc outMeta(GMatDesc) {
- return empty_scalar_desc();
- }
- };
-
- G_TYPED_KERNEL(GNormL2, , "org.opencv.core.matrixop.norml2") {
- static GScalarDesc outMeta(GMatDesc) {
- return empty_scalar_desc();
- }
- };
-
- G_TYPED_KERNEL(GNormInf, , "org.opencv.core.matrixop.norminf") {
- static GScalarDesc outMeta(GMatDesc) {
- return empty_scalar_desc();
- }
- };
-
- G_TYPED_KERNEL_M(GIntegral, , "org.opencv.core.matrixop.integral") {
- static std::tuple outMeta(GMatDesc in, int sd, int sqd) {
- return std::make_tuple(in.withSizeDelta(1,1).withDepth(sd),
- in.withSizeDelta(1,1).withDepth(sqd));
- }
- };
-
- G_TYPED_KERNEL(GThreshold, , "org.opencv.core.matrixop.threshold") {
- static GMatDesc outMeta(GMatDesc in, GScalarDesc, GScalarDesc, int) {
- return in;
- }
- };
-
-
- G_TYPED_KERNEL_M(GThresholdOT, , "org.opencv.core.matrixop.thresholdOT") {
- static std::tuple outMeta(GMatDesc in, GScalarDesc, int) {
- return std::make_tuple(in, empty_scalar_desc());
- }
- };
-
- G_TYPED_KERNEL(GInRange, , "org.opencv.core.matrixop.inrange") {
- static GMatDesc outMeta(GMatDesc in, GScalarDesc, GScalarDesc) {
- return in.withType(CV_8U, 1);
- }
- };
-
- G_TYPED_KERNEL_M(GSplit3, , "org.opencv.core.transform.split3") {
- static std::tuple outMeta(GMatDesc in) {
- const auto out_depth = in.depth;
- const auto out_desc = in.withType(out_depth, 1);
- return std::make_tuple(out_desc, out_desc, out_desc);
- }
- };
-
- G_TYPED_KERNEL_M(GSplit4, ,"org.opencv.core.transform.split4") {
- static std::tuple outMeta(GMatDesc in) {
- const auto out_depth = in.depth;
- const auto out_desc = in.withType(out_depth, 1);
- return std::make_tuple(out_desc, out_desc, out_desc, out_desc);
- }
- };
-
- G_TYPED_KERNEL(GMerge3, , "org.opencv.core.transform.merge3") {
- static GMatDesc outMeta(GMatDesc in, GMatDesc, GMatDesc) {
- // Preserve depth and add channel component
- return in.withType(in.depth, 3);
- }
- };
-
- G_TYPED_KERNEL(GMerge4, , "org.opencv.core.transform.merge4") {
- static GMatDesc outMeta(GMatDesc in, GMatDesc, GMatDesc, GMatDesc) {
- // Preserve depth and add channel component
- return in.withType(in.depth, 4);
- }
- };
-
- G_TYPED_KERNEL(GRemap, , "org.opencv.core.transform.remap") {
- static GMatDesc outMeta(GMatDesc in, Mat m1, Mat, int, int, Scalar) {
- return in.withSize(m1.size());
- }
- };
-
- G_TYPED_KERNEL(GFlip, , "org.opencv.core.transform.flip") {
- static GMatDesc outMeta(GMatDesc in, int) {
- return in;
- }
- };
-
- // TODO: eliminate the need in this kernel (streaming)
- G_TYPED_KERNEL(GCrop, , "org.opencv.core.transform.crop") {
- static GMatDesc outMeta(GMatDesc in, Rect rc) {
- return in.withSize(Size(rc.width, rc.height));
- }
- };
-
- G_TYPED_KERNEL(GConcatHor, , "org.opencv.imgproc.transform.concatHor") {
- static GMatDesc outMeta(GMatDesc l, GMatDesc r) {
- return l.withSizeDelta(+r.size.width, 0);
- }
- };
-
- G_TYPED_KERNEL(GConcatVert, , "org.opencv.imgproc.transform.concatVert") {
- static GMatDesc outMeta(GMatDesc t, GMatDesc b) {
- return t.withSizeDelta(0, +b.size.height);
- }
- };
-
- G_TYPED_KERNEL(GLUT, , "org.opencv.core.transform.LUT") {
- static GMatDesc outMeta(GMatDesc in, Mat) {
- return in;
- }
- };
-
- G_TYPED_KERNEL(GConvertTo, , "org.opencv.core.transform.convertTo") {
- static GMatDesc outMeta(GMatDesc in, int rdepth, double, double) {
- return rdepth < 0 ? in : in.withDepth(rdepth);
- }
- };
-
- G_TYPED_KERNEL(GSqrt, , "org.opencv.core.math.sqrt") {
- static GMatDesc outMeta(GMatDesc in) {
- return in;
- }
- };
-
- G_TYPED_KERNEL(GNormalize, , "org.opencv.core.normalize") {
- static GMatDesc outMeta(GMatDesc in, double, double, int, int ddepth) {
- // unlike opencv doesn't have a mask as a parameter
- return (ddepth < 0 ? in : in.withDepth(ddepth));
- }
- };
-
- G_TYPED_KERNEL(GWarpPerspective, , "org.opencv.core.warpPerspective") {
- static GMatDesc outMeta(GMatDesc in, const Mat&, Size dsize, int, int borderMode, const cv::Scalar&) {
- GAPI_Assert((borderMode == cv::BORDER_CONSTANT || borderMode == cv::BORDER_REPLICATE) &&
- "cv::gapi::warpPerspective supports only cv::BORDER_CONSTANT and cv::BORDER_REPLICATE border modes");
- return in.withType(in.depth, in.chan).withSize(dsize);
- }
- };
-
- G_TYPED_KERNEL(GWarpAffine, , "org.opencv.core.warpAffine") {
- static GMatDesc outMeta(GMatDesc in, const Mat&, Size dsize, int, int border_mode, const cv::Scalar&) {
- GAPI_Assert(border_mode != cv::BORDER_TRANSPARENT &&
- "cv::BORDER_TRANSPARENT mode is not supported in cv::gapi::warpAffine");
- return in.withType(in.depth, in.chan).withSize(dsize);
- }
- };
-
- G_TYPED_KERNEL(
- GKMeansND,
- ,GMat,GMat>(GMat,int,GMat,TermCriteria,int,KmeansFlags)>,
- "org.opencv.core.kmeansND") {
-
- static std::tuple
- outMeta(const GMatDesc& in, int K, const GMatDesc& bestLabels, const TermCriteria&, int,
- KmeansFlags flags) {
- GAPI_Assert(in.depth == CV_32F);
- std::vector amount_n_dim = detail::checkVector(in);
- int amount = amount_n_dim[0], dim = amount_n_dim[1];
- if (amount == -1) // Mat with height != 1, width != 1, channels != 1 given
- { // which means that kmeans will consider the following:
- amount = in.size.height;
- dim = in.size.width * in.chan;
- }
- // kmeans sets these labels' sizes when no bestLabels given:
- GMatDesc out_labels(CV_32S, 1, Size{1, amount});
- // kmeans always sets these centers' sizes:
- GMatDesc centers (CV_32F, 1, Size{dim, K});
- if (flags & KMEANS_USE_INITIAL_LABELS)
- {
- GAPI_Assert(bestLabels.depth == CV_32S);
- int labels_amount = detail::checkVector(bestLabels, 1u);
- GAPI_Assert(labels_amount == amount);
- out_labels = bestLabels; // kmeans preserves bestLabels' sizes if given
- }
- return std::make_tuple(empty_gopaque_desc(), out_labels, centers);
- }
- };
-
- G_TYPED_KERNEL(
- GKMeansNDNoInit,
- ,GMat,GMat>(GMat,int,TermCriteria,int,KmeansFlags)>,
- "org.opencv.core.kmeansNDNoInit") {
-
- static std::tuple
- outMeta(const GMatDesc& in, int K, const TermCriteria&, int, KmeansFlags flags) {
- GAPI_Assert( !(flags & KMEANS_USE_INITIAL_LABELS) );
- GAPI_Assert(in.depth == CV_32F);
- std::vector amount_n_dim = detail::checkVector(in);
- int amount = amount_n_dim[0], dim = amount_n_dim[1];
- if (amount == -1) // Mat with height != 1, width != 1, channels != 1 given
- { // which means that kmeans will consider the following:
- amount = in.size.height;
- dim = in.size.width * in.chan;
- }
- GMatDesc out_labels(CV_32S, 1, Size{1, amount});
- GMatDesc centers (CV_32F, 1, Size{dim, K});
- return std::make_tuple(empty_gopaque_desc(), out_labels, centers);
- }
- };
-
- G_TYPED_KERNEL(GKMeans2D, ,GArray,GArray>
- (GArray,int,GArray,TermCriteria,int,KmeansFlags)>,
- "org.opencv.core.kmeans2D") {
- static std::tuple
- outMeta(const GArrayDesc&,int,const GArrayDesc&,const TermCriteria&,int,KmeansFlags) {
- return std::make_tuple(empty_gopaque_desc(), empty_array_desc(), empty_array_desc());
- }
- };
-
- G_TYPED_KERNEL(GKMeans3D, ,GArray,GArray>
- (GArray,int,GArray,TermCriteria,int,KmeansFlags)>,
- "org.opencv.core.kmeans3D") {
- static std::tuple
- outMeta(const GArrayDesc&,int,const GArrayDesc&,const TermCriteria&,int,KmeansFlags) {
- return std::make_tuple(empty_gopaque_desc(), empty_array_desc(), empty_array_desc());
- }
- };
-
- G_TYPED_KERNEL(GTranspose, , "org.opencv.core.transpose") {
- static GMatDesc outMeta(GMatDesc in) {
- return in.withSize({in.size.height, in.size.width});
- }
- };
-} // namespace core
-
-namespace streaming {
-
-// Operations for Streaming (declared in this header for convenience)
-G_TYPED_KERNEL(GSize, (GMat)>, "org.opencv.streaming.size") {
- static GOpaqueDesc outMeta(const GMatDesc&) {
- return empty_gopaque_desc();
- }
-};
-
-G_TYPED_KERNEL(GSizeR, (GOpaque)>, "org.opencv.streaming.sizeR") {
- static GOpaqueDesc outMeta(const GOpaqueDesc&) {
- return empty_gopaque_desc();
- }
-};
-
-G_TYPED_KERNEL(GSizeMF, (GFrame)>, "org.opencv.streaming.sizeMF") {
- static GOpaqueDesc outMeta(const GFrameDesc&) {
- return empty_gopaque_desc();
- }
-};
-} // namespace streaming
-
-//! @addtogroup gapi_math
-//! @{
-
-/** @brief Calculates the per-element sum of two matrices.
-
-The function add calculates sum of two matrices of the same size and the same number of channels:
-\f[\texttt{dst}(I) = \texttt{saturate} ( \texttt{src1}(I) + \texttt{src2}(I)) \quad \texttt{if mask}(I) \ne0\f]
-
-The function can be replaced with matrix expressions:
- \f[\texttt{dst} = \texttt{src1} + \texttt{src2}\f]
-
-The input matrices and the output matrix can all have the same or different depths. For example, you
-can add a 16-bit unsigned matrix to a 8-bit signed matrix and store the sum as a 32-bit
-floating-point matrix. Depth of the output matrix is determined by the ddepth parameter.
-If src1.depth() == src2.depth(), ddepth can be set to the default -1. In this case, the output matrix will have
-the same depth as the input matrices.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.add"
-@param src1 first input matrix.
-@param src2 second input matrix.
-@param ddepth optional depth of the output matrix.
-@sa sub, addWeighted
-*/
-GAPI_EXPORTS_W GMat add(const GMat& src1, const GMat& src2, int ddepth = -1);
-
-/** @brief Calculates the per-element sum of matrix and given scalar.
-
-The function addC adds a given scalar value to each element of given matrix.
-The function can be replaced with matrix expressions:
-
- \f[\texttt{dst} = \texttt{src1} + \texttt{c}\f]
-
-Depth of the output matrix is determined by the ddepth parameter.
-If ddepth is set to default -1, the depth of output matrix will be the same as the depth of input matrix.
-The matrices can be single or multi channel. Output matrix must have the same size and number of channels as the input matrix.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.addC"
-@param src1 first input matrix.
-@param c scalar value to be added.
-@param ddepth optional depth of the output matrix.
-@sa sub, addWeighted
-*/
-GAPI_EXPORTS_W GMat addC(const GMat& src1, const GScalar& c, int ddepth = -1);
-//! @overload
-GAPI_EXPORTS_W GMat addC(const GScalar& c, const GMat& src1, int ddepth = -1);
-
-/** @brief Calculates the per-element difference between two matrices.
-
-The function sub calculates difference between two matrices, when both matrices have the same size and the same number of
-channels:
- \f[\texttt{dst}(I) = \texttt{src1}(I) - \texttt{src2}(I)\f]
-
-The function can be replaced with matrix expressions:
-\f[\texttt{dst} = \texttt{src1} - \texttt{src2}\f]
-
-The input matrices and the output matrix can all have the same or different depths. For example, you
-can subtract two 8-bit unsigned matrices store the result as a 16-bit signed matrix.
-Depth of the output matrix is determined by the ddepth parameter.
-If src1.depth() == src2.depth(), ddepth can be set to the default -1. In this case, the output matrix will have
-the same depth as the input matrices. The matrices can be single or multi channel.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.sub"
-@param src1 first input matrix.
-@param src2 second input matrix.
-@param ddepth optional depth of the output matrix.
-@sa add, addC
- */
-GAPI_EXPORTS_W GMat sub(const GMat& src1, const GMat& src2, int ddepth = -1);
-
-/** @brief Calculates the per-element difference between matrix and given scalar.
-
-The function can be replaced with matrix expressions:
- \f[\texttt{dst} = \texttt{src} - \texttt{c}\f]
-
-Depth of the output matrix is determined by the ddepth parameter.
-If ddepth is set to default -1, the depth of output matrix will be the same as the depth of input matrix.
-The matrices can be single or multi channel. Output matrix must have the same size as src.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.subC"
-@param src first input matrix.
-@param c scalar value to subtracted.
-@param ddepth optional depth of the output matrix.
-@sa add, addC, subRC
- */
-GAPI_EXPORTS_W GMat subC(const GMat& src, const GScalar& c, int ddepth = -1);
-
-/** @brief Calculates the per-element difference between given scalar and the matrix.
-
-The function can be replaced with matrix expressions:
- \f[\texttt{dst} = \texttt{c} - \texttt{src}\f]
-
-Depth of the output matrix is determined by the ddepth parameter.
-If ddepth is set to default -1, the depth of output matrix will be the same as the depth of input matrix.
-The matrices can be single or multi channel. Output matrix must have the same size as src.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.subRC"
-@param c scalar value to subtract from.
-@param src input matrix to be subtracted.
-@param ddepth optional depth of the output matrix.
-@sa add, addC, subC
- */
-GAPI_EXPORTS_W GMat subRC(const GScalar& c, const GMat& src, int ddepth = -1);
-
-/** @brief Calculates the per-element scaled product of two matrices.
-
-The function mul calculates the per-element product of two matrices:
-
-\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{scale} \cdot \texttt{src1} (I) \cdot \texttt{src2} (I))\f]
-
-If src1.depth() == src2.depth(), ddepth can be set to the default -1. In this case, the output matrix will have
-the same depth as the input matrices. The matrices can be single or multi channel.
-Output matrix must have the same size as input matrices.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.mul"
-@param src1 first input matrix.
-@param src2 second input matrix of the same size and the same depth as src1.
-@param scale optional scale factor.
-@param ddepth optional depth of the output matrix.
-@sa add, sub, div, addWeighted
-*/
-GAPI_EXPORTS_W GMat mul(const GMat& src1, const GMat& src2, double scale = 1.0, int ddepth = -1);
-
-/** @brief Multiplies matrix by scalar.
-
-The function mulC multiplies each element of matrix src by given scalar value:
-
-\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{src1} (I) \cdot \texttt{multiplier} )\f]
-
-The matrices can be single or multi channel. Output matrix must have the same size as src.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.mulC"
-@param src input matrix.
-@param multiplier factor to be multiplied.
-@param ddepth optional depth of the output matrix. If -1, the depth of output matrix will be the same as input matrix depth.
-@sa add, sub, div, addWeighted
-*/
-GAPI_EXPORTS_W GMat mulC(const GMat& src, double multiplier, int ddepth = -1);
-//! @overload
-GAPI_EXPORTS_W GMat mulC(const GMat& src, const GScalar& multiplier, int ddepth = -1); // FIXME: merge with mulc
-//! @overload
-GAPI_EXPORTS_W GMat mulC(const GScalar& multiplier, const GMat& src, int ddepth = -1); // FIXME: merge with mulc
-
-/** @brief Performs per-element division of two matrices.
-
-The function divides one matrix by another:
-\f[\texttt{dst(I) = saturate(src1(I)*scale/src2(I))}\f]
-
-For integer types when src2(I) is zero, dst(I) will also be zero.
-Floating point case returns Inf/NaN (according to IEEE).
-
-Different channels of
-multi-channel matrices are processed independently.
-The matrices can be single or multi channel. Output matrix must have the same size and depth as src.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.div"
-@param src1 first input matrix.
-@param src2 second input matrix of the same size and depth as src1.
-@param scale scalar factor.
-@param ddepth optional depth of the output matrix; you can only pass -1 when src1.depth() == src2.depth().
-@sa mul, add, sub
-*/
-GAPI_EXPORTS_W GMat div(const GMat& src1, const GMat& src2, double scale, int ddepth = -1);
-
-/** @brief Divides matrix by scalar.
-
-The function divC divides each element of matrix src by given scalar value:
-
-\f[\texttt{dst(I) = saturate(src(I)*scale/divisor)}\f]
-
-When divisor is zero, dst(I) will also be zero. Different channels of
-multi-channel matrices are processed independently.
-The matrices can be single or multi channel. Output matrix must have the same size and depth as src.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.divC"
-@param src input matrix.
-@param divisor number to be divided by.
-@param ddepth optional depth of the output matrix. If -1, the depth of output matrix will be the same as input matrix depth.
-@param scale scale factor.
-@sa add, sub, div, addWeighted
-*/
-GAPI_EXPORTS_W GMat divC(const GMat& src, const GScalar& divisor, double scale, int ddepth = -1);
-
-/** @brief Divides scalar by matrix.
-
-The function divRC divides given scalar by each element of matrix src and keep the division result in new matrix of the same size and type as src:
-
-\f[\texttt{dst(I) = saturate(divident*scale/src(I))}\f]
-
-When src(I) is zero, dst(I) will also be zero. Different channels of
-multi-channel matrices are processed independently.
-The matrices can be single or multi channel. Output matrix must have the same size and depth as src.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.divRC"
-@param src input matrix.
-@param divident number to be divided.
-@param ddepth optional depth of the output matrix. If -1, the depth of output matrix will be the same as input matrix depth.
-@param scale scale factor
-@sa add, sub, div, addWeighted
-*/
-GAPI_EXPORTS_W GMat divRC(const GScalar& divident, const GMat& src, double scale, int ddepth = -1);
-
-/** @brief Applies a mask to a matrix.
-
-The function mask set value from given matrix if the corresponding pixel value in mask matrix set to true,
-and set the matrix value to 0 otherwise.
-
-Supported src matrix data types are @ref CV_8UC1, @ref CV_16SC1, @ref CV_16UC1. Supported mask data type is @ref CV_8UC1.
-
-@note Function textual ID is "org.opencv.core.math.mask"
-@param src input matrix.
-@param mask input mask matrix.
-*/
-GAPI_EXPORTS_W GMat mask(const GMat& src, const GMat& mask);
-
-/** @brief Calculates an average (mean) of matrix elements.
-
-The function mean calculates the mean value M of matrix elements,
-independently for each channel, and return it.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.math.mean"
-@param src input matrix.
-@sa countNonZero, min, max
-*/
-GAPI_EXPORTS_W GScalar mean(const GMat& src);
-
-/** @brief Calculates x and y coordinates of 2D vectors from their magnitude and angle.
-
-The function polarToCart calculates the Cartesian coordinates of each 2D
-vector represented by the corresponding elements of magnitude and angle:
-\f[\begin{array}{l} \texttt{x} (I) = \texttt{magnitude} (I) \cos ( \texttt{angle} (I)) \\ \texttt{y} (I) = \texttt{magnitude} (I) \sin ( \texttt{angle} (I)) \\ \end{array}\f]
-
-The relative accuracy of the estimated coordinates is about 1e-6.
-
-First output is a matrix of x-coordinates of 2D vectors.
-Second output is a matrix of y-coordinates of 2D vectors.
-Both output must have the same size and depth as input matrices.
-
-@note Function textual ID is "org.opencv.core.math.polarToCart"
-
-@param magnitude input floating-point @ref CV_32FC1 matrix (1xN) of magnitudes of 2D vectors;
-@param angle input floating-point @ref CV_32FC1 matrix (1xN) of angles of 2D vectors.
-@param angleInDegrees when true, the input angles are measured in
-degrees, otherwise, they are measured in radians.
-@sa cartToPolar, exp, log, pow, sqrt
-*/
-GAPI_EXPORTS_W std::tuple polarToCart(const GMat& magnitude, const GMat& angle,
- bool angleInDegrees = false);
-
-/** @brief Calculates the magnitude and angle of 2D vectors.
-
-The function cartToPolar calculates either the magnitude, angle, or both
-for every 2D vector (x(I),y(I)):
-\f[\begin{array}{l} \texttt{magnitude} (I)= \sqrt{\texttt{x}(I)^2+\texttt{y}(I)^2} , \\ \texttt{angle} (I)= \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))[ \cdot180 / \pi ] \end{array}\f]
-
-The angles are calculated with accuracy about 0.3 degrees. For the point
-(0,0), the angle is set to 0.
-
-First output is a matrix of magnitudes of the same size and depth as input x.
-Second output is a matrix of angles that has the same size and depth as
-x; the angles are measured in radians (from 0 to 2\*Pi) or in degrees (0 to 360 degrees).
-
-@note Function textual ID is "org.opencv.core.math.cartToPolar"
-
-@param x matrix of @ref CV_32FC1 x-coordinates.
-@param y array of @ref CV_32FC1 y-coordinates.
-@param angleInDegrees a flag, indicating whether the angles are measured
-in radians (which is by default), or in degrees.
-@sa polarToCart
-*/
-GAPI_EXPORTS_W std::tuple cartToPolar(const GMat& x, const GMat& y,
- bool angleInDegrees = false);
-
-/** @brief Calculates the rotation angle of 2D vectors.
-
-The function cv::phase calculates the rotation angle of each 2D vector that
-is formed from the corresponding elements of x and y :
-\f[\texttt{angle} (I) = \texttt{atan2} ( \texttt{y} (I), \texttt{x} (I))\f]
-
-The angle estimation accuracy is about 0.3 degrees. When x(I)=y(I)=0 ,
-the corresponding angle(I) is set to 0.
-@param x input floating-point array of x-coordinates of 2D vectors.
-@param y input array of y-coordinates of 2D vectors; it must have the
-same size and the same type as x.
-@param angleInDegrees when true, the function calculates the angle in
-degrees, otherwise, they are measured in radians.
-@return array of vector angles; it has the same size and same type as x.
-*/
-GAPI_EXPORTS_W GMat phase(const GMat& x, const GMat &y, bool angleInDegrees = false);
-
-/** @brief Calculates a square root of array elements.
-
-The function cv::gapi::sqrt calculates a square root of each input array element.
-In case of multi-channel arrays, each channel is processed
-independently. The accuracy is approximately the same as of the built-in
-std::sqrt .
-@param src input floating-point array.
-@return output array of the same size and type as src.
-*/
-GAPI_EXPORTS_W GMat sqrt(const GMat &src);
-
-//! @} gapi_math
-//!
-//! @addtogroup gapi_pixelwise
-//! @{
-
-/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are greater compare to elements in second.
-
-The function compares elements of two matrices src1 and src2 of the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) > \texttt{src2} (I)\f]
-
-When the comparison result is true, the corresponding element of output
-array is set to 255. The comparison operations can be replaced with the
-equivalent matrix expressions:
-\f[\texttt{dst} = \texttt{src1} > \texttt{src2}\f]
-
-Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
- the input matrices/matrix.
-
-Supported input matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGT"
-@param src1 first input matrix.
-@param src2 second input matrix/scalar of the same depth as first input matrix.
-@sa min, max, threshold, cmpLE, cmpGE, cmpLT
-*/
-GAPI_EXPORTS_W GMat cmpGT(const GMat& src1, const GMat& src2);
-/** @overload
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGTScalar"
-*/
-GAPI_EXPORTS_W GMat cmpGT(const GMat& src1, const GScalar& src2);
-
-/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are less than elements in second.
-
-The function compares elements of two matrices src1 and src2 of the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) < \texttt{src2} (I)\f]
-
-When the comparison result is true, the corresponding element of output
-array is set to 255. The comparison operations can be replaced with the
-equivalent matrix expressions:
- \f[\texttt{dst} = \texttt{src1} < \texttt{src2}\f]
-
-Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
- the input matrices/matrix.
-
-Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLT"
-@param src1 first input matrix.
-@param src2 second input matrix/scalar of the same depth as first input matrix.
-@sa min, max, threshold, cmpLE, cmpGE, cmpGT
-*/
-GAPI_EXPORTS_W GMat cmpLT(const GMat& src1, const GMat& src2);
-/** @overload
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLTScalar"
-*/
-GAPI_EXPORTS_W GMat cmpLT(const GMat& src1, const GScalar& src2);
-
-/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are greater or equal compare to elements in second.
-
-The function compares elements of two matrices src1 and src2 of the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) >= \texttt{src2} (I)\f]
-
-When the comparison result is true, the corresponding element of output
-array is set to 255. The comparison operations can be replaced with the
-equivalent matrix expressions:
- \f[\texttt{dst} = \texttt{src1} >= \texttt{src2}\f]
-
-Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
- the input matrices.
-
-Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpGE"
-@param src1 first input matrix.
-@param src2 second input matrix/scalar of the same depth as first input matrix.
-@sa min, max, threshold, cmpLE, cmpGT, cmpLT
-*/
-GAPI_EXPORTS_W GMat cmpGE(const GMat& src1, const GMat& src2);
-/** @overload
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLGEcalar"
-*/
-GAPI_EXPORTS_W GMat cmpGE(const GMat& src1, const GScalar& src2);
-
-/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are less or equal compare to elements in second.
-
-The function compares elements of two matrices src1 and src2 of the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) <= \texttt{src2} (I)\f]
-
-When the comparison result is true, the corresponding element of output
-array is set to 255. The comparison operations can be replaced with the
-equivalent matrix expressions:
- \f[\texttt{dst} = \texttt{src1} <= \texttt{src2}\f]
-
-Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
- the input matrices.
-
-Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLE"
-@param src1 first input matrix.
-@param src2 second input matrix/scalar of the same depth as first input matrix.
-@sa min, max, threshold, cmpGT, cmpGE, cmpLT
-*/
-GAPI_EXPORTS_W GMat cmpLE(const GMat& src1, const GMat& src2);
-/** @overload
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpLEScalar"
-*/
-GAPI_EXPORTS_W GMat cmpLE(const GMat& src1, const GScalar& src2);
-
-/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are equal to elements in second.
-
-The function compares elements of two matrices src1 and src2 of the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) == \texttt{src2} (I)\f]
-
-When the comparison result is true, the corresponding element of output
-array is set to 255. The comparison operations can be replaced with the
-equivalent matrix expressions:
- \f[\texttt{dst} = \texttt{src1} == \texttt{src2}\f]
-
-Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
- the input matrices.
-
-Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpEQ"
-@param src1 first input matrix.
-@param src2 second input matrix/scalar of the same depth as first input matrix.
-@sa min, max, threshold, cmpNE
-*/
-GAPI_EXPORTS_W GMat cmpEQ(const GMat& src1, const GMat& src2);
-/** @overload
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpEQScalar"
-*/
-GAPI_EXPORTS_W GMat cmpEQ(const GMat& src1, const GScalar& src2);
-
-/** @brief Performs the per-element comparison of two matrices checking if elements from first matrix are not equal to elements in second.
-
-The function compares elements of two matrices src1 and src2 of the same size:
- \f[\texttt{dst} (I) = \texttt{src1} (I) != \texttt{src2} (I)\f]
-
-When the comparison result is true, the corresponding element of output
-array is set to 255. The comparison operations can be replaced with the
-equivalent matrix expressions:
- \f[\texttt{dst} = \texttt{src1} != \texttt{src2}\f]
-
-Output matrix of depth @ref CV_8U must have the same size and the same number of channels as
- the input matrices.
-
-Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpNE"
-@param src1 first input matrix.
-@param src2 second input matrix/scalar of the same depth as first input matrix.
-@sa min, max, threshold, cmpEQ
-*/
-GAPI_EXPORTS_W GMat cmpNE(const GMat& src1, const GMat& src2);
-/** @overload
-@note Function textual ID is "org.opencv.core.pixelwise.compare.cmpNEScalar"
-*/
-GAPI_EXPORTS_W GMat cmpNE(const GMat& src1, const GScalar& src2);
-
-/** @brief computes bitwise conjunction of the two matrixes (src1 & src2)
-Calculates the per-element bit-wise logical conjunction of two matrices of the same size.
-
-In case of floating-point matrices, their machine-specific bit
-representations (usually IEEE754-compliant) are used for the operation.
-In case of multi-channel matrices, each channel is processed
-independently. Output matrix must have the same size and depth as the input
-matrices.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.bitwise_and"
-
-@param src1 first input matrix.
-@param src2 second input matrix.
-*/
-GAPI_EXPORTS_W GMat bitwise_and(const GMat& src1, const GMat& src2);
-/** @overload
-@note Function textual ID is "org.opencv.core.pixelwise.bitwise_andS"
-@param src1 first input matrix.
-@param src2 scalar, which will be per-lemenetly conjuncted with elements of src1.
-*/
-GAPI_EXPORTS_W GMat bitwise_and(const GMat& src1, const GScalar& src2);
-
-/** @brief computes bitwise disjunction of the two matrixes (src1 | src2)
-Calculates the per-element bit-wise logical disjunction of two matrices of the same size.
-
-In case of floating-point matrices, their machine-specific bit
-representations (usually IEEE754-compliant) are used for the operation.
-In case of multi-channel matrices, each channel is processed
-independently. Output matrix must have the same size and depth as the input
-matrices.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.bitwise_or"
-
-@param src1 first input matrix.
-@param src2 second input matrix.
-*/
-GAPI_EXPORTS_W GMat bitwise_or(const GMat& src1, const GMat& src2);
-/** @overload
-@note Function textual ID is "org.opencv.core.pixelwise.bitwise_orS"
-@param src1 first input matrix.
-@param src2 scalar, which will be per-lemenetly disjuncted with elements of src1.
-*/
-GAPI_EXPORTS_W GMat bitwise_or(const GMat& src1, const GScalar& src2);
-
-
-/** @brief computes bitwise logical "exclusive or" of the two matrixes (src1 ^ src2)
-Calculates the per-element bit-wise logical "exclusive or" of two matrices of the same size.
-
-In case of floating-point matrices, their machine-specific bit
-representations (usually IEEE754-compliant) are used for the operation.
-In case of multi-channel matrices, each channel is processed
-independently. Output matrix must have the same size and depth as the input
-matrices.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.bitwise_xor"
-
-@param src1 first input matrix.
-@param src2 second input matrix.
-*/
-GAPI_EXPORTS_W GMat bitwise_xor(const GMat& src1, const GMat& src2);
-/** @overload
-@note Function textual ID is "org.opencv.core.pixelwise.bitwise_xorS"
-@param src1 first input matrix.
-@param src2 scalar, for which per-lemenet "logical or" operation on elements of src1 will be performed.
-*/
-GAPI_EXPORTS_W GMat bitwise_xor(const GMat& src1, const GScalar& src2);
-
-
-/** @brief Inverts every bit of an array.
-
-The function bitwise_not calculates per-element bit-wise inversion of the input
-matrix:
-\f[\texttt{dst} (I) = \neg \texttt{src} (I)\f]
-
-In case of floating-point matrices, their machine-specific bit
-representations (usually IEEE754-compliant) are used for the operation.
-In case of multi-channel matrices, each channel is processed
-independently. Output matrix must have the same size and depth as the input
-matrix.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.bitwise_not"
-
-@param src input matrix.
-*/
-GAPI_EXPORTS_W GMat bitwise_not(const GMat& src);
-
-/** @brief Select values from either first or second of input matrices by given mask.
-The function set to the output matrix either the value from the first input matrix if corresponding value of mask matrix is 255,
- or value from the second input matrix (if value of mask matrix set to 0).
-
-Input mask matrix must be of @ref CV_8UC1 type, two other inout matrices and output matrix should be of the same type. The size should
-be the same for all input and output matrices.
-Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.pixelwise.select"
-
-@param src1 first input matrix.
-@param src2 second input matrix.
-@param mask mask input matrix.
-*/
-GAPI_EXPORTS_W GMat select(const GMat& src1, const GMat& src2, const GMat& mask);
-
-//! @} gapi_pixelwise
-
-
-//! @addtogroup gapi_matrixop
-//! @{
-/** @brief Calculates per-element minimum of two matrices.
-
-The function min calculates the per-element minimum of two matrices of the same size, number of channels and depth:
-\f[\texttt{dst} (I)= \min ( \texttt{src1} (I), \texttt{src2} (I))\f]
- where I is a multi-dimensional index of matrix elements. In case of
- multi-channel matrices, each channel is processed independently.
-Output matrix must be of the same size and depth as src1.
-
-Supported input matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.min"
-@param src1 first input matrix.
-@param src2 second input matrix of the same size and depth as src1.
-@sa max, cmpEQ, cmpLT, cmpLE
-*/
-GAPI_EXPORTS_W GMat min(const GMat& src1, const GMat& src2);
-
-/** @brief Calculates per-element maximum of two matrices.
-
-The function max calculates the per-element maximum of two matrices of the same size, number of channels and depth:
-\f[\texttt{dst} (I)= \max ( \texttt{src1} (I), \texttt{src2} (I))\f]
- where I is a multi-dimensional index of matrix elements. In case of
- multi-channel matrices, each channel is processed independently.
-Output matrix must be of the same size and depth as src1.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.max"
-@param src1 first input matrix.
-@param src2 second input matrix of the same size and depth as src1.
-@sa min, compare, cmpEQ, cmpGT, cmpGE
-*/
-GAPI_EXPORTS_W GMat max(const GMat& src1, const GMat& src2);
-
-/** @brief Calculates the per-element absolute difference between two matrices.
-
-The function absDiff calculates absolute difference between two matrices of the same size and depth:
- \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{src2}(I)|)\f]
- where I is a multi-dimensional index of matrix elements. In case of
- multi-channel matrices, each channel is processed independently.
-Output matrix must have the same size and depth as input matrices.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.absdiff"
-@param src1 first input matrix.
-@param src2 second input matrix.
-@sa abs
-*/
-GAPI_EXPORTS_W GMat absDiff(const GMat& src1, const GMat& src2);
-
-/** @brief Calculates absolute value of matrix elements.
-
-The function abs calculates absolute difference between matrix elements and given scalar value:
- \f[\texttt{dst}(I) = \texttt{saturate} (| \texttt{src1}(I) - \texttt{matC}(I)|)\f]
- where matC is constructed from given scalar c and has the same sizes and depth as input matrix src.
-
-Output matrix must be of the same size and depth as src.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.absdiffC"
-@param src input matrix.
-@param c scalar to be subtracted.
-@sa min, max
-*/
-GAPI_EXPORTS_W GMat absDiffC(const GMat& src, const GScalar& c);
-
-/** @brief Calculates sum of all matrix elements.
-
-The function sum calculates sum of all matrix elements, independently for each channel.
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.sum"
-@param src input matrix.
-@sa countNonZero, mean, min, max
-*/
-GAPI_EXPORTS_W GScalar sum(const GMat& src);
-
-/** @brief Counts non-zero array elements.
-
-The function returns the number of non-zero elements in src :
-\f[\sum _{I: \; \texttt{src} (I) \ne0 } 1\f]
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.countNonZero"
-@param src input single-channel matrix.
-@sa mean, min, max
-*/
-GAPI_EXPORTS_W GOpaque countNonZero(const GMat& src);
-
-/** @brief Calculates the weighted sum of two matrices.
-
-The function addWeighted calculates the weighted sum of two matrices as follows:
-\f[\texttt{dst} (I)= \texttt{saturate} ( \texttt{src1} (I)* \texttt{alpha} + \texttt{src2} (I)* \texttt{beta} + \texttt{gamma} )\f]
-where I is a multi-dimensional index of array elements. In case of multi-channel matrices, each
-channel is processed independently.
-
-The function can be replaced with a matrix expression:
- \f[\texttt{dst}(I) = \texttt{alpha} * \texttt{src1}(I) - \texttt{beta} * \texttt{src2}(I) + \texttt{gamma} \f]
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.addweighted"
-@param src1 first input matrix.
-@param alpha weight of the first matrix elements.
-@param src2 second input matrix of the same size and channel number as src1.
-@param beta weight of the second matrix elements.
-@param gamma scalar added to each sum.
-@param ddepth optional depth of the output matrix.
-@sa add, sub
-*/
-GAPI_EXPORTS_W GMat addWeighted(const GMat& src1, double alpha, const GMat& src2, double beta, double gamma, int ddepth = -1);
-
-/** @brief Calculates the absolute L1 norm of a matrix.
-
-This version of normL1 calculates the absolute L1 norm of src.
-
-As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
-The \f$ L_{1} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
-is calculated as follows
-\f{align*}
- \| r(-1) \|_{L_1} &= |-1| + |2| = 3 \\
-\f}
-and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
-\f{align*}
- \| r(0.5) \|_{L_1} &= |0.5| + |0.5| = 1 \\
-\f}
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.norml1"
-@param src input matrix.
-@sa normL2, normInf
-*/
-GAPI_EXPORTS_W GScalar normL1(const GMat& src);
-
-/** @brief Calculates the absolute L2 norm of a matrix.
-
-This version of normL2 calculates the absolute L2 norm of src.
-
-As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
-The \f$ L_{2} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
-is calculated as follows
-\f{align*}
- \| r(-1) \|_{L_2} &= \sqrt{(-1)^{2} + (2)^{2}} = \sqrt{5} \\
-\f}
-and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
-\f{align*}
- \| r(0.5) \|_{L_2} &= \sqrt{(0.5)^{2} + (0.5)^{2}} = \sqrt{0.5} \\
-\f}
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-@note Function textual ID is "org.opencv.core.matrixop.norml2"
-@param src input matrix.
-@sa normL1, normInf
-*/
-GAPI_EXPORTS_W GScalar normL2(const GMat& src);
-
-/** @brief Calculates the absolute infinite norm of a matrix.
-
-This version of normInf calculates the absolute infinite norm of src.
-
-As example for one array consider the function \f$r(x)= \begin{pmatrix} x \\ 1-x \end{pmatrix}, x \in [-1;1]\f$.
-The \f$ L_{\infty} \f$ norm for the sample value \f$r(-1) = \begin{pmatrix} -1 \\ 2 \end{pmatrix}\f$
-is calculated as follows
-\f{align*}
- \| r(-1) \|_{L_\infty} &= \max(|-1|,|2|) = 2
-\f}
-and for \f$r(0.5) = \begin{pmatrix} 0.5 \\ 0.5 \end{pmatrix}\f$ the calculation is
-\f{align*}
- \| r(0.5) \|_{L_\infty} &= \max(|0.5|,|0.5|) = 0.5.
-\f}
-
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.norminf"
-@param src input matrix.
-@sa normL1, normL2
-*/
-GAPI_EXPORTS_W GScalar normInf(const GMat& src);
-
-/** @brief Calculates the integral of an image.
-
-The function calculates one or more integral images for the source image as follows:
-
-\f[\texttt{sum} (X,Y) = \sum _{x integral(const GMat& src, int sdepth = -1, int sqdepth = -1);
-
-/** @brief Applies a fixed-level threshold to each matrix element.
-
-The function applies fixed-level thresholding to a single- or multiple-channel matrix.
-The function is typically used to get a bi-level (binary) image out of a grayscale image ( cmp functions could be also used for
-this purpose) or for removing a noise, that is, filtering out pixels with too small or too large
-values. There are several types of thresholding supported by the function. They are determined by
-type parameter.
-
-Also, the special values cv::THRESH_OTSU or cv::THRESH_TRIANGLE may be combined with one of the
-above values. In these cases, the function determines the optimal threshold value using the Otsu's
-or Triangle algorithm and uses it instead of the specified thresh . The function returns the
-computed threshold value in addititon to thresholded matrix.
-The Otsu's and Triangle methods are implemented only for 8-bit matrices.
-
-Input image should be single channel only in case of cv::THRESH_OTSU or cv::THRESH_TRIANGLE flags.
-Output matrix must be of the same size and depth as src.
-
-@note Function textual ID is "org.opencv.core.matrixop.threshold"
-
-@param src input matrix (@ref CV_8UC1, @ref CV_8UC3, or @ref CV_32FC1).
-@param thresh threshold value.
-@param maxval maximum value to use with the cv::THRESH_BINARY and cv::THRESH_BINARY_INV thresholding
-types.
-@param type thresholding type (see the cv::ThresholdTypes).
-
-@sa min, max, cmpGT, cmpLE, cmpGE, cmpLT
- */
-GAPI_EXPORTS_W GMat threshold(const GMat& src, const GScalar& thresh, const GScalar& maxval, int type);
-/** @overload
-This function applicable for all threshold types except cv::THRESH_OTSU and cv::THRESH_TRIANGLE
-@note Function textual ID is "org.opencv.core.matrixop.thresholdOT"
-*/
-GAPI_EXPORTS_W std::tuple threshold(const GMat& src, const GScalar& maxval, int type);
-
-/** @brief Applies a range-level threshold to each matrix element.
-
-The function applies range-level thresholding to a single- or multiple-channel matrix.
-It sets output pixel value to OxFF if the corresponding pixel value of input matrix is in specified range,or 0 otherwise.
-
-Input and output matrices must be CV_8UC1.
-
-@note Function textual ID is "org.opencv.core.matrixop.inRange"
-
-@param src input matrix (CV_8UC1).
-@param threshLow lower boundary value.
-@param threshUp upper boundary value.
-
-@sa threshold
- */
-GAPI_EXPORTS_W GMat inRange(const GMat& src, const GScalar& threshLow, const GScalar& threshUp);
-
-//! @} gapi_matrixop
-
-//! @addtogroup gapi_transform
-//! @{
-/** @brief Creates one 4-channel matrix out of 4 single-channel ones.
-
-The function merges several matrices to make a single multi-channel matrix. That is, each
-element of the output matrix will be a concatenation of the elements of the input matrices, where
-elements of i-th input matrix are treated as mv[i].channels()-element vectors.
-Output matrix must be of @ref CV_8UC4 type.
-
-The function split4 does the reverse operation.
-
-@note
- - Function textual ID is "org.opencv.core.transform.merge4"
-
-@param src1 first input @ref CV_8UC1 matrix to be merged.
-@param src2 second input @ref CV_8UC1 matrix to be merged.
-@param src3 third input @ref CV_8UC1 matrix to be merged.
-@param src4 fourth input @ref CV_8UC1 matrix to be merged.
-@sa merge3, split4, split3
-*/
-GAPI_EXPORTS_W GMat merge4(const GMat& src1, const GMat& src2, const GMat& src3, const GMat& src4);
-
-/** @brief Creates one 3-channel matrix out of 3 single-channel ones.
-
-The function merges several matrices to make a single multi-channel matrix. That is, each
-element of the output matrix will be a concatenation of the elements of the input matrices, where
-elements of i-th input matrix are treated as mv[i].channels()-element vectors.
-Output matrix must be of @ref CV_8UC3 type.
-
-The function split3 does the reverse operation.
-
-@note
- - Function textual ID is "org.opencv.core.transform.merge3"
-
-@param src1 first input @ref CV_8UC1 matrix to be merged.
-@param src2 second input @ref CV_8UC1 matrix to be merged.
-@param src3 third input @ref CV_8UC1 matrix to be merged.
-@sa merge4, split4, split3
-*/
-GAPI_EXPORTS_W GMat merge3(const GMat& src1, const GMat& src2, const GMat& src3);
-
-/** @brief Divides a 4-channel matrix into 4 single-channel matrices.
-
-The function splits a 4-channel matrix into 4 single-channel matrices:
-\f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f]
-
-All output matrices must be of @ref CV_8UC1 type.
-
-The function merge4 does the reverse operation.
-
-@note
- - Function textual ID is "org.opencv.core.transform.split4"
-
-@param src input @ref CV_8UC4 matrix.
-@sa split3, merge3, merge4
-*/
-GAPI_EXPORTS_W std::tuple split4(const GMat& src);
-
-/** @brief Divides a 3-channel matrix into 3 single-channel matrices.
-
-The function splits a 3-channel matrix into 3 single-channel matrices:
-\f[\texttt{mv} [c](I) = \texttt{src} (I)_c\f]
-
-All output matrices must be of @ref CV_8UC1 type.
-
-The function merge3 does the reverse operation.
-
-@note
- - Function textual ID is "org.opencv.core.transform.split3"
-
-@param src input @ref CV_8UC3 matrix.
-@sa split4, merge3, merge4
-*/
-GAPI_EXPORTS_W std::tuple split3(const GMat& src);
-
-/** @brief Applies a generic geometrical transformation to an image.
-
-The function remap transforms the source image using the specified map:
-
-\f[\texttt{dst} (x,y) = \texttt{src} (map_x(x,y),map_y(x,y))\f]
-
-where values of pixels with non-integer coordinates are computed using one of available
-interpolation methods. \f$map_x\f$ and \f$map_y\f$ can be encoded as separate floating-point maps
-in \f$map_1\f$ and \f$map_2\f$ respectively, or interleaved floating-point maps of \f$(x,y)\f$ in
-\f$map_1\f$, or fixed-point maps created by using convertMaps. The reason you might want to
-convert from floating to fixed-point representations of a map is that they can yield much faster
-(\~2x) remapping operations. In the converted case, \f$map_1\f$ contains pairs (cvFloor(x),
-cvFloor(y)) and \f$map_2\f$ contains indices in a table of interpolation coefficients.
-Output image must be of the same size and depth as input one.
-
-@note
- - Function textual ID is "org.opencv.core.transform.remap"
- - Due to current implementation limitations the size of an input and output images should be less than 32767x32767.
-
-@param src Source image.
-@param map1 The first map of either (x,y) points or just x values having the type CV_16SC2,
-CV_32FC1, or CV_32FC2.
-@param map2 The second map of y values having the type CV_16UC1, CV_32FC1, or none (empty map
-if map1 is (x,y) points), respectively.
-@param interpolation Interpolation method (see cv::InterpolationFlags). The methods #INTER_AREA
-and #INTER_LINEAR_EXACT are not supported by this function.
-@param borderMode Pixel extrapolation method (see cv::BorderTypes). When
-borderMode=BORDER_TRANSPARENT, it means that the pixels in the destination image that
-corresponds to the "outliers" in the source image are not modified by the function.
-@param borderValue Value used in case of a constant border. By default, it is 0.
- */
-GAPI_EXPORTS_W GMat remap(const GMat& src, const Mat& map1, const Mat& map2,
- int interpolation, int borderMode = BORDER_CONSTANT,
- const Scalar& borderValue = Scalar());
-
-/** @brief Flips a 2D matrix around vertical, horizontal, or both axes.
-
-The function flips the matrix in one of three different ways (row
-and column indices are 0-based):
-\f[\texttt{dst} _{ij} =
-\left\{
-\begin{array}{l l}
-\texttt{src} _{\texttt{src.rows}-i-1,j} & if\; \texttt{flipCode} = 0 \\
-\texttt{src} _{i, \texttt{src.cols} -j-1} & if\; \texttt{flipCode} > 0 \\
-\texttt{src} _{ \texttt{src.rows} -i-1, \texttt{src.cols} -j-1} & if\; \texttt{flipCode} < 0 \\
-\end{array}
-\right.\f]
-The example scenarios of using the function are the following:
-* Vertical flipping of the image (flipCode == 0) to switch between
- top-left and bottom-left image origin. This is a typical operation
- in video processing on Microsoft Windows\* OS.
-* Horizontal flipping of the image with the subsequent horizontal
- shift and absolute difference calculation to check for a
- vertical-axis symmetry (flipCode \> 0).
-* Simultaneous horizontal and vertical flipping of the image with
- the subsequent shift and absolute difference calculation to check
- for a central symmetry (flipCode \< 0).
-* Reversing the order of point arrays (flipCode \> 0 or
- flipCode == 0).
-Output image must be of the same depth as input one, size should be correct for given flipCode.
-
-@note Function textual ID is "org.opencv.core.transform.flip"
-
-@param src input matrix.
-@param flipCode a flag to specify how to flip the array; 0 means
-flipping around the x-axis and positive value (for example, 1) means
-flipping around y-axis. Negative value (for example, -1) means flipping
-around both axes.
-@sa remap
-*/
-GAPI_EXPORTS_W GMat flip(const GMat& src, int flipCode);
-
-/** @brief Crops a 2D matrix.
-
-The function crops the matrix by given cv::Rect.
-
-Output matrix must be of the same depth as input one, size is specified by given rect size.
-
-@note Function textual ID is "org.opencv.core.transform.crop"
-
-@param src input matrix.
-@param rect a rect to crop a matrix to
-@sa resize
-*/
-GAPI_EXPORTS_W GMat crop(const GMat& src, const Rect& rect);
-
-/** @brief Applies horizontal concatenation to given matrices.
-
-The function horizontally concatenates two GMat matrices (with the same number of rows).
-@code{.cpp}
- GMat A = { 1, 4,
- 2, 5,
- 3, 6 };
- GMat B = { 7, 10,
- 8, 11,
- 9, 12 };
-
- GMat C = gapi::concatHor(A, B);
- //C:
- //[1, 4, 7, 10;
- // 2, 5, 8, 11;
- // 3, 6, 9, 12]
-@endcode
-Output matrix must the same number of rows and depth as the src1 and src2, and the sum of cols of the src1 and src2.
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.imgproc.transform.concatHor"
-
-@param src1 first input matrix to be considered for horizontal concatenation.
-@param src2 second input matrix to be considered for horizontal concatenation.
-@sa concatVert
-*/
-GAPI_EXPORTS_W GMat concatHor(const GMat& src1, const GMat& src2);
-
-/** @overload
-The function horizontally concatenates given number of GMat matrices (with the same number of columns).
-Output matrix must the same number of columns and depth as the input matrices, and the sum of rows of input matrices.
-
-@param v vector of input matrices to be concatenated horizontally.
-*/
-GAPI_EXPORTS_W GMat concatHor(const std::vector &v);
-
-/** @brief Applies vertical concatenation to given matrices.
-
-The function vertically concatenates two GMat matrices (with the same number of cols).
- @code{.cpp}
- GMat A = { 1, 7,
- 2, 8,
- 3, 9 };
- GMat B = { 4, 10,
- 5, 11,
- 6, 12 };
-
- GMat C = gapi::concatVert(A, B);
- //C:
- //[1, 7;
- // 2, 8;
- // 3, 9;
- // 4, 10;
- // 5, 11;
- // 6, 12]
- @endcode
-
-Output matrix must the same number of cols and depth as the src1 and src2, and the sum of rows of the src1 and src2.
-Supported matrix data types are @ref CV_8UC1, @ref CV_8UC3, @ref CV_16UC1, @ref CV_16SC1, @ref CV_32FC1.
-
-@note Function textual ID is "org.opencv.imgproc.transform.concatVert"
-
-@param src1 first input matrix to be considered for vertical concatenation.
-@param src2 second input matrix to be considered for vertical concatenation.
-@sa concatHor
-*/
-GAPI_EXPORTS_W GMat concatVert(const GMat& src1, const GMat& src2);
-
-/** @overload
-The function vertically concatenates given number of GMat matrices (with the same number of columns).
-Output matrix must the same number of columns and depth as the input matrices, and the sum of rows of input matrices.
-
-@param v vector of input matrices to be concatenated vertically.
-*/
-GAPI_EXPORTS_W GMat concatVert(const std::vector &v);
-
-
-/** @brief Performs a look-up table transform of a matrix.
-
-The function LUT fills the output matrix with values from the look-up table. Indices of the entries
-are taken from the input matrix. That is, the function processes each element of src as follows:
-\f[\texttt{dst} (I) \leftarrow \texttt{lut(src(I))}\f]
-
-Supported matrix data types are @ref CV_8UC1.
-Output is a matrix of the same size and number of channels as src, and the same depth as lut.
-
-@note Function textual ID is "org.opencv.core.transform.LUT"
-
-@param src input matrix of 8-bit elements.
-@param lut look-up table of 256 elements; in case of multi-channel input array, the table should
-either have a single channel (in this case the same table is used for all channels) or the same
-number of channels as in the input matrix.
-*/
-GAPI_EXPORTS_W GMat LUT(const GMat& src, const Mat& lut);
-
-/** @brief Converts a matrix to another data depth with optional scaling.
-
-The method converts source pixel values to the target data depth. saturate_cast\<\> is applied at
-the end to avoid possible overflows:
-
-\f[m(x,y) = saturate \_ cast( \alpha (*this)(x,y) + \beta )\f]
-Output matrix must be of the same size as input one.
-
-@note Function textual ID is "org.opencv.core.transform.convertTo"
-@param src input matrix to be converted from.
-@param rdepth desired output matrix depth or, rather, the depth since the number of channels are the
-same as the input has; if rdepth is negative, the output matrix will have the same depth as the input.
-@param alpha optional scale factor.
-@param beta optional delta added to the scaled values.
- */
-GAPI_EXPORTS_W GMat convertTo(const GMat& src, int rdepth, double alpha=1, double beta=0);
-
-/** @brief Normalizes the norm or value range of an array.
-
-The function normalizes scale and shift the input array elements so that
-\f[\| \texttt{dst} \| _{L_p}= \texttt{alpha}\f]
-(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that
-\f[\min _I \texttt{dst} (I)= \texttt{alpha} , \, \, \max _I \texttt{dst} (I)= \texttt{beta}\f]
-when normType=NORM_MINMAX (for dense arrays only).
-
-@note Function textual ID is "org.opencv.core.normalize"
-
-@param src input array.
-@param alpha norm value to normalize to or the lower range boundary in case of the range
-normalization.
-@param beta upper range boundary in case of the range normalization; it is not used for the norm
-normalization.
-@param norm_type normalization type (see cv::NormTypes).
-@param ddepth when negative, the output array has the same type as src; otherwise, it has the same
-number of channels as src and the depth =ddepth.
-@sa norm, Mat::convertTo
-*/
-GAPI_EXPORTS_W GMat normalize(const GMat& src, double alpha, double beta,
- int norm_type, int ddepth = -1);
-
-/** @brief Applies a perspective transformation to an image.
-
-The function warpPerspective transforms the source image using the specified matrix:
-
-\f[\texttt{dst} (x,y) = \texttt{src} \left ( \frac{M_{11} x + M_{12} y + M_{13}}{M_{31} x + M_{32} y + M_{33}} ,
- \frac{M_{21} x + M_{22} y + M_{23}}{M_{31} x + M_{32} y + M_{33}} \right )\f]
-
-when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted with invert
-and then put in the formula above instead of M. The function cannot operate in-place.
-
-@param src input image.
-@param M \f$3\times 3\f$ transformation matrix.
-@param dsize size of the output image.
-@param flags combination of interpolation methods (#INTER_LINEAR or #INTER_NEAREST) and the
-optional flag #WARP_INVERSE_MAP, that sets M as the inverse transformation (
-\f$\texttt{dst}\rightarrow\texttt{src}\f$ ).
-@param borderMode pixel extrapolation method (#BORDER_CONSTANT or #BORDER_REPLICATE).
-@param borderValue value used in case of a constant border; by default, it equals 0.
-
-@sa warpAffine, resize, remap, getRectSubPix, perspectiveTransform
- */
-GAPI_EXPORTS_W GMat warpPerspective(const GMat& src, const Mat& M, const Size& dsize, int flags = cv::INTER_LINEAR,
- int borderMode = cv::BORDER_CONSTANT, const Scalar& borderValue = Scalar());
-
-/** @brief Applies an affine transformation to an image.
-
-The function warpAffine transforms the source image using the specified matrix:
-
-\f[\texttt{dst} (x,y) = \texttt{src} ( \texttt{M} _{11} x + \texttt{M} _{12} y + \texttt{M} _{13}, \texttt{M} _{21} x + \texttt{M} _{22} y + \texttt{M} _{23})\f]
-
-when the flag #WARP_INVERSE_MAP is set. Otherwise, the transformation is first inverted
-with #invertAffineTransform and then put in the formula above instead of M. The function cannot
-operate in-place.
-
-@param src input image.
-@param M \f$2\times 3\f$ transformation matrix.
-@param dsize size of the output image.
-@param flags combination of interpolation methods (see #InterpolationFlags) and the optional
-flag #WARP_INVERSE_MAP that means that M is the inverse transformation (
-\f$\texttt{dst}\rightarrow\texttt{src}\f$ ).
-@param borderMode pixel extrapolation method (see #BorderTypes);
-borderMode=#BORDER_TRANSPARENT isn't supported
-@param borderValue value used in case of a constant border; by default, it is 0.
-
-@sa warpPerspective, resize, remap, getRectSubPix, transform
- */
-GAPI_EXPORTS_W GMat warpAffine(const GMat& src, const Mat& M, const Size& dsize, int flags = cv::INTER_LINEAR,
- int borderMode = cv::BORDER_CONSTANT, const Scalar& borderValue = Scalar());
-//! @} gapi_transform
-
-/** @brief Finds centers of clusters and groups input samples around the clusters.
-
-The function kmeans implements a k-means algorithm that finds the centers of K clusters
-and groups the input samples around the clusters. As an output, \f$\texttt{bestLabels}_i\f$
-contains a 0-based cluster index for the \f$i^{th}\f$ sample.
-
-@note
- - Function textual ID is "org.opencv.core.kmeansND"
- - In case of an N-dimentional points' set given, input GMat can have the following traits:
-2 dimensions, a single row or column if there are N channels,
-or N columns if there is a single channel. Mat should have @ref CV_32F depth.
- - Although, if GMat with height != 1, width != 1, channels != 1 given as data, n-dimensional
-samples are considered given in amount of A, where A = height, n = width * channels.
- - In case of GMat given as data:
- - the output labels are returned as 1-channel GMat with sizes
-width = 1, height = A, where A is samples amount, or width = bestLabels.width,
-height = bestLabels.height if bestLabels given;
- - the cluster centers are returned as 1-channel GMat with sizes
-width = n, height = K, where n is samples' dimentionality and K is clusters' amount.
- - As one of possible usages, if you want to control the initial labels for each attempt
-by yourself, you can utilize just the core of the function. To do that, set the number
-of attempts to 1, initialize labels each time using a custom algorithm, pass them with the
-( flags = #KMEANS_USE_INITIAL_LABELS ) flag, and then choose the best (most-compact) clustering.
-
-@param data Data for clustering. An array of N-Dimensional points with float coordinates is needed.
-Function can take GArray, GArray for 2D and 3D cases or GMat for any
-dimentionality and channels.
-@param K Number of clusters to split the set by.
-@param bestLabels Optional input integer array that can store the supposed initial cluster indices
-for every sample. Used when ( flags = #KMEANS_USE_INITIAL_LABELS ) flag is set.
-@param criteria The algorithm termination criteria, that is, the maximum number of iterations
-and/or the desired accuracy. The accuracy is specified as criteria.epsilon. As soon as each of
-the cluster centers moves by less than criteria.epsilon on some iteration, the algorithm stops.
-@param attempts Flag to specify the number of times the algorithm is executed using different
-initial labellings. The algorithm returns the labels that yield the best compactness (see the first
-function return value).
-@param flags Flag that can take values of cv::KmeansFlags .
-
-@return
- - Compactness measure that is computed as
-\f[\sum _i \| \texttt{samples} _i - \texttt{centers} _{ \texttt{labels} _i} \| ^2\f]
-after every attempt. The best (minimum) value is chosen and the corresponding labels and the
-compactness value are returned by the function.
- - Integer array that stores the cluster indices for every sample.
- - Array of the cluster centers.
-*/
-GAPI_EXPORTS_W std::tuple,GMat,GMat>
-kmeans(const GMat& data, const int K, const GMat& bestLabels,
- const TermCriteria& criteria, const int attempts, const KmeansFlags flags);
-
-/** @overload
-@note
- - Function textual ID is "org.opencv.core.kmeansNDNoInit"
- - #KMEANS_USE_INITIAL_LABELS flag must not be set while using this overload.
- */
-GAPI_EXPORTS_W std::tuple,GMat,GMat>
-kmeans(const GMat& data, const int K, const TermCriteria& criteria, const int attempts,
- const KmeansFlags flags);
-
-/** @overload
-@note Function textual ID is "org.opencv.core.kmeans2D"
- */
-GAPI_EXPORTS_W std::tuple,GArray,GArray>
-kmeans(const GArray& data, const int K, const GArray& bestLabels,
- const TermCriteria& criteria, const int attempts, const KmeansFlags flags);
-
-/** @overload
-@note Function textual ID is "org.opencv.core.kmeans3D"
- */
-GAPI_EXPORTS_W std::tuple,GArray,GArray>
-kmeans(const GArray& data, const int K, const GArray& bestLabels,
- const TermCriteria& criteria, const int attempts, const KmeansFlags flags);
-
-
-/** @brief Transposes a matrix.
-
-The function transposes the matrix:
-\f[\texttt{dst} (i,j) = \texttt{src} (j,i)\f]
-
-@note
- - Function textual ID is "org.opencv.core.transpose"
- - No complex conjugation is done in case of a complex matrix. It should be done separately if needed.
-
-@param src input array.
-*/
-GAPI_EXPORTS_W GMat transpose(const GMat& src);
-
-
-namespace streaming {
-/** @brief Gets dimensions from Mat.
-
-@note Function textual ID is "org.opencv.streaming.size"
-
-@param src Input tensor
-@return Size (tensor dimensions).
-*/
-GAPI_EXPORTS_W GOpaque size(const GMat& src);
-
-/** @overload
-Gets dimensions from rectangle.
-
-@note Function textual ID is "org.opencv.streaming.sizeR"
-
-@param r Input rectangle.
-@return Size (rectangle dimensions).
-*/
-GAPI_EXPORTS_W GOpaque size(const GOpaque& r);
-
-/** @brief Gets dimensions from MediaFrame.
-
-@note Function textual ID is "org.opencv.streaming.sizeMF"
-
-@param src Input frame
-@return Size (frame dimensions).
-*/
-GAPI_EXPORTS_W GOpaque size(const GFrame& src);
-} //namespace streaming
-} //namespace gapi
-} //namespace cv
-
-#endif //OPENCV_GAPI_CORE_HPP
diff --git a/modules/gapi/include/opencv2/gapi/cpu/core.hpp b/modules/gapi/include/opencv2/gapi/cpu/core.hpp
deleted file mode 100644
index ee86fb72c2..0000000000
--- a/modules/gapi/include/opencv2/gapi/cpu/core.hpp
+++ /dev/null
@@ -1,27 +0,0 @@
-// This file is part of OpenCV project.
-// It is subject to the license terms in the LICENSE file found in the top-level directory
-// of this distribution and at http://opencv.org/license.html.
-//
-// Copyright (C) 2018 Intel Corporation
-
-
-#ifndef OPENCV_GAPI_CPU_CORE_API_HPP
-#define OPENCV_GAPI_CPU_CORE_API_HPP
-
-#include // GKernelPackage
-#include // GAPI_EXPORTS
-
-namespace cv {
-namespace gapi {
-namespace core {
-namespace cpu {
-
-GAPI_EXPORTS_W cv::GKernelPackage kernels();
-
-} // namespace cpu
-} // namespace core
-} // namespace gapi
-} // namespace cv
-
-
-#endif // OPENCV_GAPI_CPU_CORE_API_HPP
diff --git a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp b/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp
deleted file mode 100644
index eb5f784747..0000000000
--- a/modules/gapi/include/opencv2/gapi/cpu/gcpukernel.hpp
+++ /dev/null
@@ -1,542 +0,0 @@
-// This file is part of OpenCV project.
-// It is subject to the license terms in the LICENSE file found in the top-level directory
-// of this distribution and at http://opencv.org/license.html.
-//
-// Copyright (C) 2018-2022 Intel Corporation
-
-
-#ifndef OPENCV_GAPI_GCPUKERNEL_HPP
-#define OPENCV_GAPI_GCPUKERNEL_HPP
-
-#if defined _MSC_VER
-#pragma warning(push)
-#pragma warning(disable: 4702) // "Unreachable code" on postprocess(...) call inside OCVCallHelper
-#endif
-
-#include
-#include
-#include
-#include
-
-#include
-#include
-#include
-#include
-#include
-#include //suppress_unused_warning
-#include
-
-// FIXME: namespace scheme for backends?
-namespace cv {
-
-namespace gimpl
-{
- // Forward-declare an internal class
- class GCPUExecutable;
-} // namespace gimpl
-
-namespace gapi
-{
-/**
- * @brief This namespace contains G-API CPU backend functions,
- * structures, and symbols.
- */
-namespace cpu
-{
- /**
- * \addtogroup gapi_std_backends
- * @{
- *
- * @brief G-API backends available in this OpenCV version
- *
- * G-API backends play a corner stone role in G-API execution
- * stack. Every backend is hardware-oriented and thus can run its
- * kernels efficiently on the target platform.
- *
- * Backends are usually "black boxes" for G-API users -- on the API
- * side, all backends are represented as different objects of the
- * same class cv::gapi::GBackend.
- * User can manipulate with backends by specifying which kernels to use.
- *
- * @sa @ref gapi_hld
- */
-
- /**
- * @brief Get a reference to CPU (OpenCV) backend.
- *
- * This is the default backend in G-API at the moment, providing
- * broader functional coverage but losing some graph model
- * advantages. Provided mostly for reference and prototyping
- * purposes.
- *
- * @sa gapi_std_backends
- */
- GAPI_EXPORTS cv::gapi::GBackend backend();
- /** @} */
-
- class GOCVFunctor;
-
- //! @cond IGNORED
- template
- GOCVFunctor ocv_kernel(const Callable& c);
-
- template
- GOCVFunctor ocv_kernel(Callable& c);
- //! @endcond
-
-} // namespace cpu
-} // namespace gapi
-
-// Represents arguments which are passed to a wrapped CPU function
-// FIXME: put into detail?
-class GAPI_EXPORTS GCPUContext
-{
-public:
- // Generic accessor API
- template
- const T& inArg(int input) { return m_args.at(input).get(); }
-
- // Syntax sugar
- const cv::Mat& inMat(int input);
- cv::Mat& outMatR(int output); // FIXME: Avoid cv::Mat m = ctx.outMatR()
-
- const cv::Scalar& inVal(int input);
- cv::Scalar& outValR(int output); // FIXME: Avoid cv::Scalar s = ctx.outValR()
- cv::MediaFrame& outFrame(int output);
- template std::vector& outVecR(int output) // FIXME: the same issue
- {
- return outVecRef(output).wref();
- }
- template T& outOpaqueR(int output) // FIXME: the same issue
- {
- return outOpaqueRef(output).wref();
- }
-
- GArg state()
- {
- return m_state;
- }
-
-protected:
- detail::VectorRef& outVecRef(int output);
- detail::OpaqueRef& outOpaqueRef(int output);
-
- std::vector m_args;
- GArg m_state;
-
- //FIXME: avoid conversion of arguments from internal representation to OpenCV one on each call
- //to OCV kernel. (This can be achieved by a two single time conversions in GCPUExecutable::run,
- //once on enter for input and output arguments, and once before return for output arguments only
- std::unordered_map m_results;
-
- friend class gimpl::GCPUExecutable;
-};
-
-class GAPI_EXPORTS GCPUKernel
-{
-public:
- // This function is a kernel's execution entry point (does the processing work)
- using RunF = std::function;
- // This function is a stateful kernel's setup routine (configures state)
- using SetupF = std::function;
-
- GCPUKernel();
- GCPUKernel(const RunF& runF, const SetupF& setupF = nullptr);
-
- RunF m_runF = nullptr;
- SetupF m_setupF = nullptr;
-
- bool m_isStateful = false;
-};
-
-// FIXME: This is an ugly ad-hoc implementation. TODO: refactor
-
-namespace detail
-{
-template struct get_in;
-template<> struct get_in
-{
- static cv::Mat get(GCPUContext &ctx, int idx) { return ctx.inMat(idx); }
-};
-template<> struct get_in
-{
- static cv::Mat get(GCPUContext &ctx, int idx) { return get_in::get(ctx, idx); }
-};
-template<> struct get_in
-{
- static cv::MediaFrame get(GCPUContext &ctx, int idx) { return ctx.inArg(idx); }
-};
-template<> struct get_in
-{
- static cv::Scalar get(GCPUContext &ctx, int idx) { return ctx.inVal(idx); }
-};
-template struct get_in >
-{
- static const std::vector& get(GCPUContext &ctx, int idx) { return ctx.inArg(idx).rref(); }
-};
-template struct get_in >
-{
- static const U& get(GCPUContext &ctx, int idx) { return ctx.inArg(idx).rref(); }
-};
-
-//FIXME(dm): GArray/GArray conversion should be done more gracefully in the system
-template<> struct get_in >: public get_in >
-{
-};
-
-//FIXME(dm): GArray/GArray conversion should be done more gracefully in the system
-template<> struct get_in >: public get_in >
-{
-};
-
-// FIXME(dm): GArray>/GArray> conversion should be done more gracefully in the system
-template struct get_in> >: public get_in> >
-{
-};
-
-//FIXME(dm): GOpaque/GOpaque conversion should be done more gracefully in the system
-template<> struct get_in >: public get_in >
-{
-};
-
-//FIXME(dm): GOpaque/GOpaque conversion should be done more gracefully in the system
-template<> struct get_in >: public get_in >
-{
-};
-
-template struct get_in
-{
- static T get(GCPUContext &ctx, int idx) { return ctx.inArg(idx); }
-};
-
-struct tracked_cv_mat{
- tracked_cv_mat(cv::Mat& m) : r{m}, original_data{m.data} {}
- cv::Mat r;
- uchar* original_data;
-
- operator cv::Mat& (){ return r;}
- void validate() const{
- if (r.data != original_data)
- {
- util::throw_error
- (std::logic_error
- ("OpenCV kernel output parameter was reallocated. \n"
- "Incorrect meta data was provided ?"));
- }
- }
-};
-
-template
-void postprocess(Outputs&... outs)
-{
- struct
- {
- void operator()(tracked_cv_mat* bm) { bm->validate(); }
- void operator()(...) { }
-
- } validate;
- //dummy array to unfold parameter pack
- int dummy[] = { 0, (validate(&outs), 0)... };
- cv::util::suppress_unused_warning(dummy);
-}
-
-template struct get_out;
-template<> struct get_out
-{
- static tracked_cv_mat get(GCPUContext &ctx, int idx)
- {
- auto& r = ctx.outMatR(idx);
- return {r};
- }
-};
-template<> struct get_out