Merge pull request #18510 from OrestChura:oc/boundingRect

[G-API]: findContours() and boundingRect() Standard Kernels Implementation

* Add findContours() standard kernel
 - API and documentation provided:
   - as OpenCV provides two overloads whether to calculate hierarchy or not, but they differ by only the output in sight of G-API, two different G-API functions and kernels implemented
   - G-API Imgproc documentation divided into more parts according to imgproc module parts
   - some typos connected with division into parts corrected
 - `GArray<GArray<U>>` overload for `get_out` function provided to coonvert correctly into `vector<vector<U>>`
 - OCV backend supported
 - accuracy tests provided

* Add boundingRect() standard kernel
     - API and documentation provided:
       - GOpaque<Rect> used as an output
       - as OpenCV provides two possibilities whether to take a gray-scale image or a set of 2D points (`Point2i` or `Point2f` supported), three different overloads of a single G-API function and three kernels implemented
          - for a gray-scale image the overload via `GMat`
          - for a set of `Point2i` - the one via GArray<`Point2i`>
          - set of `Point2f` -> GArray<`Point2f`>
     - OCV backend supported
     - accuracy tests provided
       - comparison function for Rects provided
     - some typos in `gapi_tests_common` corrected

* Fix precommit windows warnings

* - Addressing comments:
   - split tests
 - Fix Windows warnings

* Static_cast for warnings

* - Remove randomness
 - Fix unnecessary precision losses

* - Forgot reference for RNG

* addressing comments

* equalizeHist -> no group

* `const` addedin new functions

* Address suggestions:
 - Hierarchical -> H
 - added cv::GMatDesc::isVectorPoins()
 - added support of giving a set of points to boundingRect()

* Addressing comments
 - IoU comparison function added for Rects
 - isPointsVector moved from a GMatDesc method to a separate function in imgproc.hpp
 - enums instead of int
 - typos corrected

* Addressing comments
 - findContours: Point offset -> GOpaque<Point>
 - removed "straight" comparison for Rects, IoU available only
 - changed vectors initialization -> fix Debug test run
 - Some typos

* added comment for later upgrades

* Fix not to corrupt docs by FIXME

* Addressing commens
 - overload without offset added (as a temporary workaround)
 - checkMetaForFindingContours -> validateFindingContoursMeta
 - added ostream overload for enums used in tests
This commit is contained in:
Orest Chura 2020-11-11 15:13:10 +03:00 committed by GitHub
parent ef32d7fd16
commit 3fc1c73064
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 821 additions and 14 deletions

View File

@ -271,6 +271,11 @@ template<> struct get_out<cv::GArray<cv::GMat> >: public get_out<cv::GArray<cv::
{
};
// FIXME(dm): GArray<vector<U>>/GArray<GArray<U>> conversion should be done more gracefully in the system
template<typename U> struct get_out<cv::GArray<cv::GArray<U>> >: public get_out<cv::GArray<std::vector<U>> >
{
};
template<typename U> struct get_out<cv::GOpaque<U>>
{
static U& get(GCPUContext &ctx, int idx)

View File

@ -21,14 +21,45 @@
@{
@defgroup gapi_filters Graph API: Image filters
@defgroup gapi_colorconvert Graph API: Converting image from one color space to another
@defgroup gapi_feature Graph API: Image Feature Detection
@defgroup gapi_shape Graph API: Image Structural Analysis and Shape Descriptors
@}
*/
namespace {
void validateFindingContoursMeta(const int depth, const int chan, const int mode)
{
GAPI_Assert(chan == 1);
switch (mode)
{
case cv::RETR_CCOMP:
GAPI_Assert(depth == CV_8U || depth == CV_32S);
break;
case cv::RETR_FLOODFILL:
GAPI_Assert(depth == CV_32S);
break;
default:
GAPI_Assert(depth == CV_8U);
break;
}
}
// Checks if the passed mat is a set of n-dimentional points of the given depth
bool isPointsVector(const int chan, const cv::Size &size, const int depth,
const int n, const int ddepth)
{
return (ddepth == depth || ddepth < 0) &&
((chan == n && (size.height == 1 || size.width == 1)) ||
(chan == 1 && size.width == n));
}
} // anonymous namespace
namespace cv { namespace gapi {
namespace imgproc {
using GMat2 = std::tuple<GMat,GMat>;
using GMat3 = std::tuple<GMat,GMat,GMat>; // FIXME: how to avoid this?
using GFindContoursOutput = std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>;
G_TYPED_KERNEL(GFilter2D, <GMat(GMat,int,Mat,Point,Scalar,int,Scalar)>,"org.opencv.imgproc.filters.filter2D") {
static GMatDesc outMeta(GMatDesc in, int ddepth, Mat, Point, Scalar, int, Scalar) {
@ -118,7 +149,7 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GCanny, <GMat(GMat,double,double,int,bool)>, "org.opencv.imgproc.canny"){
G_TYPED_KERNEL(GCanny, <GMat(GMat,double,double,int,bool)>, "org.opencv.imgproc.feature.canny"){
static GMatDesc outMeta(GMatDesc in, double, double, int, bool) {
return in.withType(CV_8U, 1);
}
@ -126,12 +157,83 @@ namespace imgproc {
G_TYPED_KERNEL(GGoodFeatures,
<cv::GArray<cv::Point2f>(GMat,int,double,double,Mat,int,bool,double)>,
"org.opencv.imgproc.goodFeaturesToTrack") {
"org.opencv.imgproc.feature.goodFeaturesToTrack") {
static GArrayDesc outMeta(GMatDesc, int, double, double, const Mat&, int, bool, double) {
return empty_array_desc();
}
};
using RetrMode = RetrievalModes;
using ContMethod = ContourApproximationModes;
G_TYPED_KERNEL(GFindContours, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
"org.opencv.imgproc.shape.findContours")
{
static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
{
validateFindingContoursMeta(in.depth, in.chan, mode);
return empty_array_desc();
}
};
// FIXME oc: make default value offset = Point()
G_TYPED_KERNEL(GFindContoursNoOffset, <GArray<GArray<Point>>(GMat,RetrMode,ContMethod)>,
"org.opencv.imgproc.shape.findContoursNoOffset")
{
static GArrayDesc outMeta(GMatDesc in, RetrMode mode, ContMethod)
{
validateFindingContoursMeta(in.depth, in.chan, mode);
return empty_array_desc();
}
};
G_TYPED_KERNEL(GFindContoursH,<GFindContoursOutput(GMat,RetrMode,ContMethod,GOpaque<Point>)>,
"org.opencv.imgproc.shape.findContoursH")
{
static std::tuple<GArrayDesc,GArrayDesc>
outMeta(GMatDesc in, RetrMode mode, ContMethod, GOpaqueDesc)
{
validateFindingContoursMeta(in.depth, in.chan, mode);
return std::make_tuple(empty_array_desc(), empty_array_desc());
}
};
// FIXME oc: make default value offset = Point()
G_TYPED_KERNEL(GFindContoursHNoOffset,<GFindContoursOutput(GMat,RetrMode,ContMethod)>,
"org.opencv.imgproc.shape.findContoursHNoOffset")
{
static std::tuple<GArrayDesc,GArrayDesc>
outMeta(GMatDesc in, RetrMode mode, ContMethod)
{
validateFindingContoursMeta(in.depth, in.chan, mode);
return std::make_tuple(empty_array_desc(), empty_array_desc());
}
};
G_TYPED_KERNEL(GBoundingRectMat, <GOpaque<Rect>(GMat)>,
"org.opencv.imgproc.shape.boundingRectMat") {
static GOpaqueDesc outMeta(GMatDesc in) {
GAPI_Assert((in.depth == CV_8U && in.chan == 1) ||
(isPointsVector(in.chan, in.size, in.depth, 2, CV_32S) ||
isPointsVector(in.chan, in.size, in.depth, 2, CV_32F)));
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GBoundingRectVector32S, <GOpaque<Rect>(GArray<Point2i>)>,
"org.opencv.imgproc.shape.boundingRectVector32S") {
static GOpaqueDesc outMeta(GArrayDesc) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GBoundingRectVector32F, <GOpaque<Rect>(GArray<Point2f>)>,
"org.opencv.imgproc.shape.boundingRectVector32F") {
static GOpaqueDesc outMeta(GArrayDesc) {
return empty_gopaque_desc();
}
};
G_TYPED_KERNEL(GBGR2RGB, <GMat(GMat)>, "org.opencv.imgproc.colorconvert.bgr2rgb") {
static GMatDesc outMeta(GMatDesc in) {
return in; // type still remains CV_8UC3;
@ -280,7 +382,7 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GNV12toRGBp, <GMatP(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12torgbp") {
G_TYPED_KERNEL(GNV12toRGBp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12torgbp") {
static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
GAPI_Assert(inY.depth == CV_8U);
GAPI_Assert(inUV.depth == CV_8U);
@ -294,7 +396,7 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GNV12toGray, <GMat(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12togray") {
G_TYPED_KERNEL(GNV12toGray, <GMat(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12togray") {
static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
GAPI_Assert(inY.depth == CV_8U);
GAPI_Assert(inUV.depth == CV_8U);
@ -309,7 +411,7 @@ namespace imgproc {
}
};
G_TYPED_KERNEL(GNV12toBGRp, <GMatP(GMat,GMat)>, "org.opencv.colorconvert.imgproc.nv12tobgrp") {
G_TYPED_KERNEL(GNV12toBGRp, <GMatP(GMat,GMat)>, "org.opencv.imgproc.colorconvert.nv12tobgrp") {
static GMatDesc outMeta(GMatDesc inY, GMatDesc inUV) {
GAPI_Assert(inY.depth == CV_8U);
GAPI_Assert(inUV.depth == CV_8U);
@ -800,6 +902,10 @@ proportional to sigmaSpace.
GAPI_EXPORTS GMat bilateralFilter(const GMat& src, int d, double sigmaColor, double sigmaSpace,
int borderType = BORDER_DEFAULT);
//! @} gapi_filters
//! @addtogroup gapi_feature
//! @{
/** @brief Finds edges in an image using the Canny algorithm.
The function finds edges in the input image and marks them in the output map edges using the
@ -807,7 +913,7 @@ Canny algorithm. The smallest value between threshold1 and threshold2 is used fo
largest value is used to find initial segments of strong edges. See
<http://en.wikipedia.org/wiki/Canny_edge_detector>
@note Function textual ID is "org.opencv.imgproc.filters.canny"
@note Function textual ID is "org.opencv.imgproc.feature.canny"
@param image 8-bit input image.
@param threshold1 first threshold for the hysteresis procedure.
@ -842,7 +948,7 @@ The function can be used to initialize a point-based tracker of an object.
A \> B, the vector of returned corners with qualityLevel=A will be the prefix of the output vector
with qualityLevel=B .
@note Function textual ID is "org.opencv.imgproc.goodFeaturesToTrack"
@note Function textual ID is "org.opencv.imgproc.feature.goodFeaturesToTrack"
@param image Input 8-bit or floating-point 32-bit, single-channel image.
@param maxCorners Maximum number of corners to return. If there are more corners than are found,
@ -876,6 +982,8 @@ GAPI_EXPORTS GArray<Point2f> goodFeaturesToTrack(const GMat &image,
/** @brief Equalizes the histogram of a grayscale image.
//! @} gapi_feature
The function equalizes the histogram of the input image using the following algorithm:
- Calculate the histogram \f$H\f$ for src .
@ -893,6 +1001,120 @@ The algorithm normalizes the brightness and increases the contrast of the image.
*/
GAPI_EXPORTS GMat equalizeHist(const GMat& src);
//! @addtogroup gapi_shape
//! @{
/** @brief Finds contours in a binary image.
The function retrieves contours from the binary image using the algorithm @cite Suzuki85 .
The contours are a useful tool for shape analysis and object detection and recognition.
See squares.cpp in the OpenCV sample directory.
@note Function textual ID is "org.opencv.imgproc.shape.findContours"
@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL then @ref CV_32SC1 is supported only.
@param mode Contour retrieval mode, see #RetrievalModes
@param method Contour approximation method, see #ContourApproximationModes
@param offset Optional offset by which every contour point is shifted. This is useful if the
contours are extracted from the image ROI and then they should be analyzed in the whole image
context.
@return GArray of detected contours. Each contour is stored as a GArray of points.
*/
GAPI_EXPORTS GArray<GArray<Point>>
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
const GOpaque<Point> &offset);
// FIXME oc: make default value offset = Point()
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.findContoursNoOffset"
*/
GAPI_EXPORTS GArray<GArray<Point>>
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
/** @brief Finds contours and their hierarchy in a binary image.
The function retrieves contours from the binary image using the algorithm @cite Suzuki85
and calculates their hierarchy.
The contours are a useful tool for shape analysis and object detection and recognition.
See squares.cpp in the OpenCV sample directory.
@note Function textual ID is "org.opencv.imgproc.shape.findContoursH"
@param src Input gray-scale image @ref CV_8UC1. Non-zero pixels are treated as 1's. Zero
pixels remain 0's, so the image is treated as binary . You can use #compare, #inRange, #threshold ,
#adaptiveThreshold, #Canny, and others to create a binary image out of a grayscale or color one.
If mode equals to #RETR_CCOMP, the input can also be a 32-bit integer
image of labels ( @ref CV_32SC1 ). If #RETR_FLOODFILL -- @ref CV_32SC1 supports only.
@param mode Contour retrieval mode, see #RetrievalModes
@param method Contour approximation method, see #ContourApproximationModes
@param offset Optional offset by which every contour point is shifted. This is useful if the
contours are extracted from the image ROI and then they should be analyzed in the whole image
context.
@return GArray of detected contours. Each contour is stored as a GArray of points.
@return Optional output GArray of cv::Vec4i, containing information about the image topology.
It has as many elements as the number of contours. For each i-th contour contours[i], the elements
hierarchy[i][0] , hierarchy[i][1] , hierarchy[i][2] , and hierarchy[i][3] are set to 0-based
indices in contours of the next and previous contours at the same hierarchical level, the first
child contour and the parent contour, respectively. If for the contour i there are no next,
previous, parent, or nested contours, the corresponding elements of hierarchy[i] will be negative.
*/
GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
const GOpaque<Point> &offset);
// FIXME oc: make default value offset = Point()
/** @overload
@note Function textual ID is "org.opencv.imgproc.shape.findContoursHNoOffset"
*/
GAPI_EXPORTS std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method);
/** @brief Calculates the up-right bounding rectangle of a point set or non-zero pixels
of gray-scale image.
The function calculates and returns the minimal up-right bounding rectangle for the specified
point set or non-zero pixels of gray-scale image.
@note Function textual ID is "org.opencv.imgproc.shape.boundingRectMat"
@param src Input gray-scale image @ref CV_8UC1; or input set of @ref CV_32S or @ref CV_32F
2D points stored in Mat.
@note In case of a 2D points' set given, Mat should be 2-dimensional, have a single row or column
if there are 2 channels, or have 2 columns if there is a single channel. Mat should have either
@ref CV_32S or @ref CV_32F depth
*/
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GMat& src);
/** @overload
Calculates the up-right bounding rectangle of a point set.
@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32S"
@param src Input 2D point set, stored in std::vector<cv::Point2i>.
*/
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2i>& src);
/** @overload
Calculates the up-right bounding rectangle of a point set.
@note Function textual ID is "org.opencv.imgproc.shape.boundingRectVector32F"
@param src Input 2D point set, stored in std::vector<cv::Point2f>.
*/
GAPI_EXPORTS GOpaque<Rect> boundingRect(const GArray<Point2f>& src);
//! @} gapi_shape
//! @addtogroup gapi_colorconvert
//! @{
/** @brief Converts an image from BGR color space to RGB color space.
The function converts an input image from BGR color space to RGB.
@ -907,10 +1129,6 @@ Output image is 8-bit unsigned 3-channel image @ref CV_8UC3.
*/
GAPI_EXPORTS GMat BGR2RGB(const GMat& src);
//! @} gapi_filters
//! @addtogroup gapi_colorconvert
//! @{
/** @brief Converts an image from RGB color space to gray-scaled.
The conventional ranges for R, G, and B channel values are 0 to 255.
Resulting gray color value computed as

View File

@ -122,6 +122,48 @@ cv::GArray<cv::Point2f> goodFeaturesToTrack(const GMat& image, int maxCorners, d
useHarrisDetector, k);
}
GArray<GArray<Point>>
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
const GOpaque<Point> &offset)
{
return imgproc::GFindContours::on(src, mode, method, offset);
}
GArray<GArray<Point>>
findContours(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method)
{
return imgproc::GFindContoursNoOffset::on(src, mode, method);
}
std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method,
const GOpaque<Point> &offset)
{
return imgproc::GFindContoursH::on(src, mode, method, offset);
}
std::tuple<GArray<GArray<Point>>,GArray<Vec4i>>
findContoursH(const GMat &src, const RetrievalModes mode, const ContourApproximationModes method)
{
return imgproc::GFindContoursHNoOffset::on(src, mode, method);
}
GOpaque<Rect> boundingRect(const GMat& src)
{
return imgproc::GBoundingRectMat::on(src);
}
GOpaque<Rect> boundingRect(const GArray<Point2i>& src)
{
return imgproc::GBoundingRectVector32S::on(src);
}
GOpaque<Rect> boundingRect(const GArray<Point2f>& src)
{
return imgproc::GBoundingRectVector32F::on(src);
}
GMat BGR2RGB(const GMat& src)
{
return imgproc::GBGR2RGB::on(src);

View File

@ -221,6 +221,70 @@ GAPI_OCV_KERNEL(GCPUGoodFeatures, cv::gapi::imgproc::GGoodFeatures)
}
};
GAPI_OCV_KERNEL(GCPUFindContours, cv::gapi::imgproc::GFindContours)
{
static void run(const cv::Mat& image, const cv::RetrievalModes mode,
const cv::ContourApproximationModes method, const cv::Point& offset,
std::vector<std::vector<cv::Point>> &outConts)
{
cv::findContours(image, outConts, mode, method, offset);
}
};
GAPI_OCV_KERNEL(GCPUFindContoursNoOffset, cv::gapi::imgproc::GFindContoursNoOffset)
{
static void run(const cv::Mat& image, const cv::RetrievalModes mode,
const cv::ContourApproximationModes method,
std::vector<std::vector<cv::Point>> &outConts)
{
cv::findContours(image, outConts, mode, method);
}
};
GAPI_OCV_KERNEL(GCPUFindContoursH, cv::gapi::imgproc::GFindContoursH)
{
static void run(const cv::Mat& image, const cv::RetrievalModes mode,
const cv::ContourApproximationModes method, const cv::Point& offset,
std::vector<std::vector<cv::Point>> &outConts, std::vector<cv::Vec4i> &outHier)
{
cv::findContours(image, outConts, outHier, mode, method, offset);
}
};
GAPI_OCV_KERNEL(GCPUFindContoursHNoOffset, cv::gapi::imgproc::GFindContoursHNoOffset)
{
static void run(const cv::Mat& image, const cv::RetrievalModes mode,
const cv::ContourApproximationModes method,
std::vector<std::vector<cv::Point>> &outConts, std::vector<cv::Vec4i> &outHier)
{
cv::findContours(image, outConts, outHier, mode, method);
}
};
GAPI_OCV_KERNEL(GCPUBoundingRectMat, cv::gapi::imgproc::GBoundingRectMat)
{
static void run(const cv::Mat& in, cv::Rect& out)
{
out = cv::boundingRect(in);
}
};
GAPI_OCV_KERNEL(GCPUBoundingRectVector32S, cv::gapi::imgproc::GBoundingRectVector32S)
{
static void run(const std::vector<cv::Point2i>& in, cv::Rect& out)
{
out = cv::boundingRect(in);
}
};
GAPI_OCV_KERNEL(GCPUBoundingRectVector32F, cv::gapi::imgproc::GBoundingRectVector32F)
{
static void run(const std::vector<cv::Point2f>& in, cv::Rect& out)
{
out = cv::boundingRect(in);
}
};
GAPI_OCV_KERNEL(GCPUBGR2RGB, cv::gapi::imgproc::GBGR2RGB)
{
static void run(const cv::Mat& in, cv::Mat &out)
@ -496,8 +560,15 @@ cv::gapi::GKernelPackage cv::gapi::imgproc::cpu::kernels()
, GCPUCanny
, GCPUGoodFeatures
, GCPUEqualizeHist
, GCPUFindContours
, GCPUFindContoursNoOffset
, GCPUFindContoursH
, GCPUFindContoursHNoOffset
, GCPUBGR2RGB
, GCPURGB2YUV
, GCPUBoundingRectMat
, GCPUBoundingRectVector32S
, GCPUBoundingRectVector32F
, GCPUYUV2RGB
, GCPUBGR2I420
, GCPURGB2I420

View File

@ -66,6 +66,21 @@ GAPI_TEST_FIXTURE_SPEC_PARAMS(GoodFeaturesTest,
double,int,bool),
8, cmpF, fileName, type, maxCorners, qualityLevel, minDistance,
blockSize, useHarrisDetector)
GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursNoOffsetTest,
FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes,
cv::ContourApproximationModes),
4, sz, type, mode, method)
GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursOffsetTest, <>, 0)
GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHNoOffsetTest,
FIXTURE_API(cv::Size,MatType2,cv::RetrievalModes,
cv::ContourApproximationModes),
4, sz, type, mode, method)
GAPI_TEST_FIXTURE_SPEC_PARAMS(FindContoursHOffsetTest, <>, 0)
GAPI_TEST_FIXTURE(BoundingRectMatTest, initMatrixRandU, FIXTURE_API(CompareRects), 1, cmpF)
GAPI_TEST_FIXTURE(BoundingRectMatVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF)
GAPI_TEST_FIXTURE(BoundingRectMatVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF)
GAPI_TEST_FIXTURE(BoundingRectVector32STest, initNothing, FIXTURE_API(CompareRects), 1, cmpF)
GAPI_TEST_FIXTURE(BoundingRectVector32FTest, initNothing, FIXTURE_API(CompareRects), 1, cmpF)
GAPI_TEST_FIXTURE(BGR2RGBTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
GAPI_TEST_FIXTURE(RGB2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)
GAPI_TEST_FIXTURE(BGR2GrayTest, initMatrixRandN, FIXTURE_API(CompareMats), 1, cmpF)

View File

@ -50,6 +50,27 @@ namespace
rgb2yuyv(in_line_p, out_line_p, in.cols);
}
}
// Draw random ellipses on given mat of given size and type
void initMatForFindingContours(cv::Mat& mat, const cv::Size& sz, const int type)
{
cv::RNG& rng = theRNG();
mat = cv::Mat(sz, type, cv::Scalar::all(0));
size_t numEllipses = rng.uniform(1, 10);
for( size_t i = 0; i < numEllipses; i++ )
{
cv::Point center;
cv::Size axes;
center.x = rng.uniform(0, sz.width);
center.y = rng.uniform(0, sz.height);
axes.width = rng.uniform(2, sz.width);
axes.height = rng.uniform(2, sz.height);
int color = rng.uniform(1, 256);
double angle = rng.uniform(0., 180.);
cv::ellipse(mat, center, axes, angle, 0., 360., color, 1, FILLED);
}
}
}
TEST_P(Filter2DTest, AccuracyTest)
@ -470,6 +491,267 @@ TEST_P(GoodFeaturesTest, AccuracyTest)
}
}
TEST_P(FindContoursNoOffsetTest, AccuracyTest)
{
std::vector<std::vector<cv::Point>> outCtsOCV, outCtsGAPI;
initMatForFindingContours(in_mat1, sz, type);
out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0));
out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0));
// OpenCV code /////////////////////////////////////////////////////////////
{
cv::findContours(in_mat1, outCtsOCV, mode, method);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
cv::GArray<cv::GArray<cv::Point>> outCts;
outCts = cv::gapi::findContours(in, mode, method);
cv::GComputation c(GIn(in), GOut(outCts));
c.apply(gin(in_mat1), gout(outCtsGAPI), getCompileArgs());
// Comparison //////////////////////////////////////////////////////////////
EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1));
cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1));
EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi));
}
TEST_P(FindContoursOffsetTest, AccuracyTest)
{
const cv::Size sz(1280, 720);
const MatType2 type = CV_8UC1;
const cv::RetrievalModes mode = cv::RETR_EXTERNAL;
const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE;
const cv::Point offset(15, 15);
std::vector<std::vector<cv::Point>> outCtsOCV, outCtsGAPI;
initMatForFindingContours(in_mat1, sz, type);
out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0));
out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0));
// OpenCV code /////////////////////////////////////////////////////////////
{
cv::findContours(in_mat1, outCtsOCV, mode, method, offset);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
GOpaque<Point> gOffset;
cv::GArray<cv::GArray<cv::Point>> outCts;
outCts = cv::gapi::findContours(in, mode, method, gOffset);
cv::GComputation c(GIn(in, gOffset), GOut(outCts));
c.apply(gin(in_mat1, offset), gout(outCtsGAPI), getCompileArgs());
// Comparison //////////////////////////////////////////////////////////////
EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1));
cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1));
EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi));
}
TEST_P(FindContoursHNoOffsetTest, AccuracyTest)
{
std::vector<std::vector<cv::Point>> outCtsOCV, outCtsGAPI;
std::vector<cv::Vec4i> outHierOCV, outHierGAPI;
initMatForFindingContours(in_mat1, sz, type);
out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0));
out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0));
// OpenCV code /////////////////////////////////////////////////////////////
{
cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
cv::GArray<cv::GArray<cv::Point>> outCts;
cv::GArray<cv::Vec4i> outHier;
std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method);
cv::GComputation c(GIn(in), GOut(outCts, outHier));
c.apply(gin(in_mat1), gout(outCtsGAPI, outHierGAPI), getCompileArgs());
// Comparison //////////////////////////////////////////////////////////////
EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1));
cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1));
EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi));
EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
EXPECT_TRUE(AbsExactVector<cv::Vec4i>().to_compare_f()(outHierOCV, outHierGAPI));
}
TEST_P(FindContoursHOffsetTest, AccuracyTest)
{
const cv::Size sz(1280, 720);
const MatType2 type = CV_8UC1;
const cv::RetrievalModes mode = cv::RETR_EXTERNAL;
const cv::ContourApproximationModes method = cv::CHAIN_APPROX_NONE;
const cv::Point offset(15, 15);
std::vector<std::vector<cv::Point>> outCtsOCV, outCtsGAPI;
std::vector<cv::Vec4i> outHierOCV, outHierGAPI;
initMatForFindingContours(in_mat1, sz, type);
out_mat_gapi = cv::Mat(sz, type, cv::Scalar::all(0));
out_mat_ocv = cv::Mat(sz, type, cv::Scalar::all(0));
// OpenCV code /////////////////////////////////////////////////////////////
{
cv::findContours(in_mat1, outCtsOCV, outHierOCV, mode, method, offset);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
GOpaque<Point> gOffset;
cv::GArray<cv::GArray<cv::Point>> outCts;
cv::GArray<cv::Vec4i> outHier;
std::tie(outCts, outHier) = cv::gapi::findContoursH(in, mode, method, gOffset);
cv::GComputation c(GIn(in, gOffset), GOut(outCts, outHier));
c.apply(gin(in_mat1, offset), gout(outCtsGAPI, outHierGAPI), getCompileArgs());
// Comparison //////////////////////////////////////////////////////////////
EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
cv::fillPoly(out_mat_ocv, outCtsOCV, cv::Scalar::all(1));
cv::fillPoly(out_mat_gapi, outCtsGAPI, cv::Scalar::all(1));
EXPECT_TRUE(AbsExact().to_compare_f()(out_mat_ocv, out_mat_gapi));
EXPECT_TRUE(outCtsGAPI.size() == outCtsOCV.size());
EXPECT_TRUE(AbsExactVector<cv::Vec4i>().to_compare_f()(outHierOCV, outHierGAPI));
}
TEST_P(BoundingRectMatTest, AccuracyTest)
{
cv::Rect out_rect_gapi, out_rect_ocv;
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::boundingRect(in);
cv::GComputation c(cv::GIn(in), cv::GOut(out));
c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs());
// OpenCV code /////////////////////////////////////////////////////////////
{
out_rect_ocv = cv::boundingRect(in_mat1);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
}
}
TEST_P(BoundingRectMatVector32STest, AccuracyTest)
{
cv::Rect out_rect_gapi, out_rect_ocv;
std::vector<cv::Point2i> in_vectorS(sz.width);
cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255));
in_mat1 = cv::Mat(in_vectorS);
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::boundingRect(in);
cv::GComputation c(cv::GIn(in), cv::GOut(out));
c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs());
// OpenCV code /////////////////////////////////////////////////////////////
{
out_rect_ocv = cv::boundingRect(in_mat1);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
}
}
TEST_P(BoundingRectMatVector32FTest, AccuracyTest)
{
cv::RNG& rng = theRNG();
cv::Rect out_rect_gapi, out_rect_ocv;
std::vector<cv::Point2f> in_vectorF(sz.width);
const int fscale = 256; // avoid bits near ULP, generate stable test input
for (int i = 0; i < sz.width; i++)
{
cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast<float>(fscale),
rng.uniform(0, 255 * fscale) / static_cast<float>(fscale));
in_vectorF.push_back(pt);
}
in_mat1 = cv::Mat(in_vectorF);
// G-API code //////////////////////////////////////////////////////////////
cv::GMat in;
auto out = cv::gapi::boundingRect(in);
cv::GComputation c(cv::GIn(in), cv::GOut(out));
c.apply(cv::gin(in_mat1), cv::gout(out_rect_gapi), getCompileArgs());
// OpenCV code /////////////////////////////////////////////////////////////
{
out_rect_ocv = cv::boundingRect(in_mat1);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
}
}
TEST_P(BoundingRectVector32STest, AccuracyTest)
{
cv::Rect out_rect_gapi, out_rect_ocv;
std::vector<cv::Point2i> in_vectorS(sz.width);
cv::randu(in_vectorS, cv::Scalar::all(0), cv::Scalar::all(255));
// G-API code //////////////////////////////////////////////////////////////
cv::GArray<cv::Point2i> in;
auto out = cv::gapi::boundingRect(in);
cv::GComputation c(cv::GIn(in), cv::GOut(out));
c.apply(cv::gin(in_vectorS), cv::gout(out_rect_gapi), getCompileArgs());
// OpenCV code /////////////////////////////////////////////////////////////
{
out_rect_ocv = cv::boundingRect(in_vectorS);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
}
}
TEST_P(BoundingRectVector32FTest, AccuracyTest)
{
cv::RNG& rng = theRNG();
cv::Rect out_rect_gapi, out_rect_ocv;
std::vector<cv::Point2f> in_vectorF(sz.width);
const int fscale = 256; // avoid bits near ULP, generate stable test input
for (int i = 0; i < sz.width; i++)
{
cv::Point2f pt(rng.uniform(0, 255 * fscale) / static_cast<float>(fscale),
rng.uniform(0, 255 * fscale) / static_cast<float>(fscale));
in_vectorF.push_back(pt);
}
// G-API code //////////////////////////////////////////////////////////////
cv::GArray<cv::Point2f> in;
auto out = cv::gapi::boundingRect(in);
cv::GComputation c(cv::GIn(in), cv::GOut(out));
c.apply(cv::gin(in_vectorF), cv::gout(out_rect_gapi), getCompileArgs());
// OpenCV code /////////////////////////////////////////////////////////////
{
out_rect_ocv = cv::boundingRect(in_vectorF);
}
// Comparison //////////////////////////////////////////////////////////////
{
EXPECT_TRUE(cmpF(out_rect_gapi, out_rect_ocv));
}
}
TEST_P(BGR2RGBTest, AccuracyTest)
{
// G-API code //////////////////////////////////////////////////////////////

View File

@ -463,6 +463,7 @@ struct TestWithParamsSpecific : public TestWithParamsBase<ParamsSpecific<Specifi
using compare_f = std::function<bool(const cv::Mat &a, const cv::Mat &b)>;
using compare_scalar_f = std::function<bool(const cv::Scalar &a, const cv::Scalar &b)>;
using compare_rect_f = std::function<bool(const cv::Rect &a, const cv::Rect &b)>;
template<typename Elem>
using compare_vector_f = std::function<bool(const std::vector<Elem> &a,
@ -489,6 +490,7 @@ private:
using CompareMats = CompareF<cv::Mat, cv::Mat>;
using CompareScalars = CompareF<cv::Scalar, cv::Scalar>;
using CompareRects = CompareF<cv::Rect, cv::Rect>;
template<typename Elem>
using CompareVectors = CompareF<std::vector<Elem>, std::vector<Elem>>;
@ -535,6 +537,27 @@ struct WrappableScalar
}
};
template<typename T>
struct WrappableRect
{
compare_rect_f to_compare_f()
{
T t = *static_cast<T*const>(this);
return [t](const cv::Rect &a, const cv::Rect &b)
{
return t(a, b);
};
}
CompareRects to_compare_obj()
{
T t = *static_cast<T*const>(this);
std::stringstream ss;
ss << t;
return CompareRects(to_compare_f(), ss.str());
}
};
template<typename T, typename Elem>
struct WrappableVector
{
@ -719,13 +742,15 @@ public:
double err_Inf = cv::norm(in1, in2, NORM_INF);
if (err_Inf > _inf_tol)
{
std::cout << "ToleranceColor error: err_Inf=" << err_Inf << " tolerance=" << _inf_tol << std::endl;;
std::cout << "ToleranceColor error: err_Inf=" << err_Inf
<< " tolerance=" << _inf_tol << std::endl;
return false;
}
double err = cv::norm(in1, in2, NORM_L1 | NORM_RELATIVE);
if (err > _tol)
{
std::cout << "ToleranceColor error: err=" << err << " tolerance=" << _tol << std::endl;;
std::cout << "ToleranceColor error: err=" << err
<< " tolerance=" << _tol << std::endl;
return false;
}
}
@ -749,7 +774,8 @@ public:
double abs_err = std::abs(in1[0] - in2[0]) / std::max(1.0, std::abs(in2[0]));
if (abs_err > _tol)
{
std::cout << "AbsToleranceScalar error: abs_err=" << abs_err << " tolerance=" << _tol << " in1[0]" << in1[0] << " in2[0]" << in2[0] << std::endl;;
std::cout << "AbsToleranceScalar error: abs_err=" << abs_err << " tolerance=" << _tol
<< " in1[0]" << in1[0] << " in2[0]" << in2[0] << std::endl;
return false;
}
else
@ -765,6 +791,46 @@ private:
double _tol;
};
class IoUToleranceRect : public WrappableRect<IoUToleranceRect>
{
public:
IoUToleranceRect(double tol) : _tol(tol) {}
bool operator() (const cv::Rect& in1, const cv::Rect& in2) const
{
// determine the (x, y)-coordinates of the intersection rectangle
int xA = max(in1.x, in2.x);
int yA = max(in1.y, in2.y);
int xB = min(in1.br().x, in2.br().x);
int yB = min(in1.br().y, in2.br().y);
// compute the area of intersection rectangle
int interArea = max(0, xB - xA) * max(0, yB - yA);
// compute the area of union rectangle
int unionArea = in1.area() + in2.area() - interArea;
double iou = interArea / unionArea;
double err = 1 - iou;
if (err > _tol)
{
std::cout << "IoUToleranceRect error: err=" << err << " tolerance=" << _tol
<< " in1.x=" << in1.x << " in2.x=" << in2.x
<< " in1.y=" << in1.y << " in2.y=" << in2.y
<< " in1.width=" << in1.width << " in2.width=" << in2.width
<< " in1.height=" << in1.height << " in2.height=" << in2.height << std::endl;
return false;
}
else
{
return true;
}
}
friend std::ostream& operator<<(std::ostream& os, const IoUToleranceRect& obj)
{
return os << "IoUToleranceRect(" << std::to_string(obj._tol) << ")";
}
private:
double _tol;
};
template<typename Elem>
class AbsExactVector : public WrappableVector<AbsExactVector<Elem>, Elem>
{
@ -803,6 +869,11 @@ inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_sca
return os << "compare_scalar_f";
}
inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_rect_f&)
{
return os << "compare_rect_f";
}
template<typename Elem>
inline std::ostream& operator<<(std::ostream& os, const opencv_test::compare_vector_f<Elem>&)
{
@ -849,6 +920,37 @@ inline std::ostream& operator<<(std::ostream& os, NormTypes op)
return os;
}
inline std::ostream& operator<<(std::ostream& os, RetrievalModes op)
{
#define CASE(v) case RetrievalModes::v: os << #v; break
switch (op)
{
CASE(RETR_EXTERNAL);
CASE(RETR_LIST);
CASE(RETR_CCOMP);
CASE(RETR_TREE);
CASE(RETR_FLOODFILL);
default: GAPI_Assert(false && "unknown RetrievalModes value");
}
#undef CASE
return os;
}
inline std::ostream& operator<<(std::ostream& os, ContourApproximationModes op)
{
#define CASE(v) case ContourApproximationModes::v: os << #v; break
switch (op)
{
CASE(CHAIN_APPROX_NONE);
CASE(CHAIN_APPROX_SIMPLE);
CASE(CHAIN_APPROX_TC89_L1);
CASE(CHAIN_APPROX_TC89_KCOS);
default: GAPI_Assert(false && "unknown ContourApproximationModes value");
}
#undef CASE
return os;
}
inline std::ostream& operator<<(std::ostream& os, MorphTypes op)
{
#define CASE(v) case MorphTypes::v: os << #v; break

View File

@ -265,6 +265,78 @@ INSTANTIATE_TEST_CASE_P(GoodFeaturesInternalTestCPU, GoodFeaturesTest,
Values(3),
Values(true)));
INSTANTIATE_TEST_CASE_P(FindContoursNoOffsetTestCPU, FindContoursNoOffsetTest,
Combine(Values(IMGPROC_CPU),
Values(cv::Size(1280, 720)),
Values(CV_8UC1),
Values(RETR_EXTERNAL),
Values(CHAIN_APPROX_NONE)));
INSTANTIATE_TEST_CASE_P(FindContoursOffsetTestCPU, FindContoursOffsetTest,
Values(IMGPROC_CPU));
INSTANTIATE_TEST_CASE_P(FindContoursHNoOffsetTestCPU, FindContoursHNoOffsetTest,
Combine(Values(IMGPROC_CPU),
Values(cv::Size(1280, 720),
cv::Size(640, 480)),
Values(CV_8UC1),
Values(RETR_EXTERNAL, RETR_LIST, RETR_CCOMP, RETR_TREE),
Values(CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE,
CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS)));
INSTANTIATE_TEST_CASE_P(FindContoursHNoOffset32STestCPU, FindContoursHNoOffsetTest,
Combine(Values(IMGPROC_CPU),
Values(cv::Size(1280, 720),
cv::Size(640, 480)),
Values(CV_32SC1),
Values(RETR_CCOMP, RETR_FLOODFILL),
Values(CHAIN_APPROX_NONE, CHAIN_APPROX_SIMPLE,
CHAIN_APPROX_TC89_L1, CHAIN_APPROX_TC89_KCOS)));
INSTANTIATE_TEST_CASE_P(FindContoursHOffsetTestCPU, FindContoursHOffsetTest,
Values(IMGPROC_CPU));
INSTANTIATE_TEST_CASE_P(BoundingRectMatTestCPU, BoundingRectMatTest,
Combine(Values( CV_8UC1 ),
Values(cv::Size(1280, 720),
cv::Size(640, 480),
cv::Size(128, 128)),
Values(-1),
Values(IMGPROC_CPU),
Values(IoUToleranceRect(0).to_compare_obj())));
INSTANTIATE_TEST_CASE_P(BoundingRectMatVector32STestCPU, BoundingRectMatVector32STest,
Combine(Values(-1),
Values(cv::Size(1280, 1),
cv::Size(128, 1)),
Values(-1),
Values(IMGPROC_CPU),
Values(IoUToleranceRect(0).to_compare_obj())));
INSTANTIATE_TEST_CASE_P(BoundingRectMatVector32FTestCPU, BoundingRectMatVector32FTest,
Combine(Values(-1),
Values(cv::Size(1280, 1),
cv::Size(128, 1)),
Values(-1),
Values(IMGPROC_CPU),
Values(IoUToleranceRect(1e-5).to_compare_obj())));
INSTANTIATE_TEST_CASE_P(BoundingRectVector32STestCPU, BoundingRectVector32STest,
Combine(Values(-1),
Values(cv::Size(1280, 1),
cv::Size(128, 1)),
Values(-1),
Values(IMGPROC_CPU),
Values(IoUToleranceRect(0).to_compare_obj())));
INSTANTIATE_TEST_CASE_P(BoundingRectVector32FTestCPU, BoundingRectVector32FTest,
Combine(Values(-1),
Values(cv::Size(1280, 1),
cv::Size(128, 1)),
Values(-1),
Values(IMGPROC_CPU),
Values(IoUToleranceRect(1e-5).to_compare_obj())));
INSTANTIATE_TEST_CASE_P(BGR2RGBTestCPU, BGR2RGBTest,
Combine(Values(CV_8UC3),
Values(cv::Size(1280, 720),