Merge pull request #18674 from anna-khakimova:ak/backgroundSubtractor

GAPI: New BackgroundSubtractor stateful kernel

* New BackgroundSubtractorMOG2 kernel

* Add BS parameters
This commit is contained in:
Anna Khakimova 2020-11-30 21:09:42 +03:00 committed by GitHub
parent 986ad4ff06
commit 56568dae31
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 214 additions and 0 deletions

View File

@ -62,6 +62,73 @@ G_TYPED_KERNEL(GCalcOptFlowLKForPyr,
return std::make_tuple(empty_array_desc(), empty_array_desc(), empty_array_desc());
}
};
enum BackgroundSubtractorType
{
TYPE_BS_MOG2,
TYPE_BS_KNN
};
/** @brief Structure for the Background Subtractor operation's initialization parameters.*/
struct BackgroundSubtractorParams
{
//! Type of the Background Subtractor operation.
BackgroundSubtractorType operation = TYPE_BS_MOG2;
//! Length of the history.
int history = 500;
//! For MOG2: Threshold on the squared Mahalanobis distance between the pixel
//! and the model to decide whether a pixel is well described by
//! the background model.
//! For KNN: Threshold on the squared distance between the pixel and the sample
//! to decide whether a pixel is close to that sample.
double threshold = 16;
//! If true, the algorithm will detect shadows and mark them.
bool detectShadows = true;
//! The value between 0 and 1 that indicates how fast
//! the background model is learnt.
//! Negative parameter value makes the algorithm use some automatically
//! chosen learning rate.
double learningRate = -1;
//! default constructor
BackgroundSubtractorParams() {}
/** Full constructor
@param op MOG2/KNN Background Subtractor type.
@param histLength Length of the history.
@param thrshld For MOG2: Threshold on the squared Mahalanobis distance between
the pixel and the model to decide whether a pixel is well described by the background model.
For KNN: Threshold on the squared distance between the pixel and the sample to decide
whether a pixel is close to that sample.
@param detect If true, the algorithm will detect shadows and mark them. It decreases the
speed a bit, so if you do not need this feature, set the parameter to false.
@param lRate The value between 0 and 1 that indicates how fast the background model is learnt.
Negative parameter value makes the algorithm to use some automatically chosen learning rate.
*/
BackgroundSubtractorParams(BackgroundSubtractorType op, int histLength,
double thrshld, bool detect, double lRate) : operation(op),
history(histLength),
threshold(thrshld),
detectShadows(detect),
learningRate(lRate){}
};
G_TYPED_KERNEL(GBackgroundSubtractor, <GMat(GMat, BackgroundSubtractorParams)>,
"org.opencv.video.BackgroundSubtractor")
{
static GMatDesc outMeta(const GMatDesc& in, const BackgroundSubtractorParams& bsParams)
{
GAPI_Assert(bsParams.history >= 0);
GAPI_Assert(bsParams.learningRate <= 1);
return in.withType(CV_8U, 1);
}
};
} //namespace video
//! @addtogroup gapi_video
@ -169,8 +236,32 @@ calcOpticalFlowPyrLK(const GArray<GMat> &prevPyr,
int flags = 0,
double minEigThresh = 1e-4);
/** @brief Gaussian Mixture-based or K-nearest neighbours-based Background/Foreground Segmentation Algorithm.
The operation generates a foreground mask.
@return Output image is foreground mask, i.e. 8-bit unsigned 1-channel (binary) matrix @ref CV_8UC1.
@note Functional textual ID is "org.opencv.video.BackgroundSubtractor"
@param src input image: Floating point frame is used without scaling and should be in range [0,255].
@param bsParams Set of initialization parameters for Background Subtractor kernel.
*/
GAPI_EXPORTS GMat BackgroundSubtractor(const GMat& src, const cv::gapi::video::BackgroundSubtractorParams& bsParams);
//! @} gapi_video
} //namespace gapi
} //namespace cv
namespace cv { namespace detail {
template<> struct CompileArgTag<cv::gapi::video::BackgroundSubtractorParams>
{
static const char* tag()
{
return "org.opencv.video.background_substractor_params";
}
};
} // namespace detail
} //namespace cv
#endif // OPENCV_GAPI_VIDEO_HPP

View File

@ -52,5 +52,10 @@ GOptFlowLKOutput calcOpticalFlowPyrLK(const cv::GArray<cv::GMat> &prevPyr,
criteria, flags, minEigThresh);
}
GMat BackgroundSubtractor(const GMat& src, const BackgroundSubtractorParams& bsp)
{
return GBackgroundSubtractor::on(src, bsp);
}
} //namespace gapi
} //namespace cv

View File

@ -80,12 +80,40 @@ GAPI_OCV_KERNEL(GCPUCalcOptFlowLKForPyr, cv::gapi::video::GCalcOptFlowLKForPyr)
}
};
GAPI_OCV_KERNEL_ST(GCPUBackgroundSubtractor,
cv::gapi::video::GBackgroundSubtractor,
cv::BackgroundSubtractor)
{
static void setup(const cv::GMatDesc&, const cv::gapi::video::BackgroundSubtractorParams& bsParams,
std::shared_ptr<cv::BackgroundSubtractor>& state,
const cv::GCompileArgs&)
{
if (bsParams.operation == cv::gapi::video::TYPE_BS_MOG2)
state = cv::createBackgroundSubtractorMOG2(bsParams.history,
bsParams.threshold,
bsParams.detectShadows);
else if (bsParams.operation == cv::gapi::video::TYPE_BS_KNN)
state = cv::createBackgroundSubtractorKNN(bsParams.history,
bsParams.threshold,
bsParams.detectShadows);
GAPI_Assert(state);
}
static void run(const cv::Mat& in, const cv::gapi::video::BackgroundSubtractorParams& bsParams,
cv::Mat &out, cv::BackgroundSubtractor& state)
{
state.apply(in, out, bsParams.learningRate);
}
};
cv::gapi::GKernelPackage cv::gapi::video::cpu::kernels()
{
static auto pkg = cv::gapi::kernels
< GCPUBuildOptFlowPyramid
, GCPUCalcOptFlowLK
, GCPUCalcOptFlowLKForPyr
, GCPUBackgroundSubtractor
>();
return pkg;
}

View File

@ -28,6 +28,9 @@ GAPI_TEST_FIXTURE_SPEC_PARAMS(BuildPyr_CalcOptFlow_PipelineTest,
FIXTURE_API(std::string,int,int,bool), 4,
fileNamePattern, winSize, maxLevel, withDerivatives)
GAPI_TEST_FIXTURE_SPEC_PARAMS(BackgroundSubtractorTest, FIXTURE_API(tuple<cv::gapi::video::BackgroundSubtractorType,double>,
int, bool, double, std::string, std::size_t),
6, typeAndThreshold, histLength, detectShadows, learningRate, filePath, testNumFrames)
} // opencv_test

View File

@ -321,6 +321,35 @@ inline GComputation runOCVnGAPIOptFlowPipeline(TestFunctional& testInst,
return c;
}
inline void testBackgroundSubtractorStreaming(cv::GStreamingCompiled& gapiBackSub,
const cv::Ptr<cv::BackgroundSubtractor>& pOCVBackSub,
const int diffPercent, const int tolerance,
const double lRate, const std::size_t testNumFrames)
{
cv::Mat frame, gapiForeground, ocvForeground;
double numDiff = diffPercent / 100.0;
gapiBackSub.start();
EXPECT_TRUE(gapiBackSub.running());
compare_f cmpF = AbsSimilarPoints(tolerance, numDiff).to_compare_f();
// Comparison of G-API and OpenCV substractors
std::size_t frames = 0u;
while (frames <= testNumFrames && gapiBackSub.pull(cv::gout(frame, gapiForeground)))
{
pOCVBackSub->apply(frame, ocvForeground, lRate);
EXPECT_TRUE(cmpF(gapiForeground, ocvForeground));
frames++;
}
if (gapiBackSub.running())
gapiBackSub.stop();
EXPECT_LT(0u, frames);
EXPECT_FALSE(gapiBackSub.running());
}
#else // !HAVE_OPENCV_VIDEO
inline cv::GComputation runOCVnGAPIBuildOptFlowPyramid(TestFunctional&,

View File

@ -8,6 +8,7 @@
#define OPENCV_GAPI_VIDEO_TESTS_INL_HPP
#include "gapi_video_tests.hpp"
#include <opencv2/gapi/streaming/cap.hpp>
namespace opencv_test
{
@ -88,6 +89,49 @@ TEST_P(BuildPyr_CalcOptFlow_PipelineTest, AccuracyTest)
compareOutputsOptFlow(outOCV, outGAPI);
}
#ifdef HAVE_OPENCV_VIDEO
TEST_P(BackgroundSubtractorTest, AccuracyTest)
{
initTestDataPath();
cv::gapi::video::BackgroundSubtractorType opType;
double thr = -1;
std::tie(opType, thr) = typeAndThreshold;
cv::gapi::video::BackgroundSubtractorParams bsp(opType, histLength, thr,
detectShadows, learningRate);
// G-API graph declaration
cv::GMat in;
cv::GMat out = cv::gapi::BackgroundSubtractor(in, bsp);
// Preserving 'in' in output to have possibility to compare with OpenCV reference
cv::GComputation c(cv::GIn(in), cv::GOut(cv::gapi::copy(in), out));
// G-API compilation of graph for streaming mode
auto gapiBackSub = c.compileStreaming(getCompileArgs());
// Testing G-API Background Substractor in streaming mode
auto path = findDataFile("cv/video/768x576.avi");
try
{
gapiBackSub.setSource(gapi::wip::make_src<cv::gapi::wip::GCaptureSource>(path));
}
catch (...)
{ throw SkipTestException("Video file can't be opened."); }
cv::Ptr<cv::BackgroundSubtractor> pOCVBackSub;
if (opType == cv::gapi::video::TYPE_BS_MOG2)
pOCVBackSub = cv::createBackgroundSubtractorMOG2(histLength, thr,
detectShadows);
else if (opType == cv::gapi::video::TYPE_BS_KNN)
pOCVBackSub = cv::createBackgroundSubtractorKNN(histLength, thr,
detectShadows);
// Allowing 1% difference of all pixels between G-API and reference OpenCV results
testBackgroundSubtractorStreaming(gapiBackSub, pOCVBackSub, 1, 1, learningRate, testNumFrames);
}
#endif
} // opencv_test
#endif // OPENCV_GAPI_VIDEO_TESTS_INL_HPP

View File

@ -97,4 +97,18 @@ INSTANTIATE_TEST_CASE_MACRO_P(WITH_VIDEO(BuildPyr_CalcOptFlow_PipelineInternalTe
Values(15),
Values(3),
Values(true)));
INSTANTIATE_TEST_CASE_MACRO_P(WITH_VIDEO(BackgroundSubtractorTestCPU),
BackgroundSubtractorTest,
Combine(Values(VIDEO_CPU),
Values(std::make_tuple(cv::gapi::video::TYPE_BS_MOG2, 16),
std::make_tuple(cv::gapi::video::TYPE_BS_MOG2, 8),
std::make_tuple(cv::gapi::video::TYPE_BS_KNN, 400),
std::make_tuple(cv::gapi::video::TYPE_BS_KNN, 200)),
Values(500, 50),
Values(true, false),
Values(-1, 0, 0.5, 1),
Values("cv/video/768x576.avi"),
Values(3)));
} // opencv_test