refactored video module; use the new-style algorithms now

This commit is contained in:
Vadim Pisarevsky 2013-03-20 19:51:49 +04:00
parent 14a0abbfa9
commit 07e0f7bf59
28 changed files with 1113 additions and 2270 deletions

View File

@ -893,7 +893,7 @@ CV_INIT_ALGORITHM(LBPH, "FaceRecognizer.LBPH",
bool initModule_contrib() bool initModule_contrib()
{ {
Ptr<Algorithm> efaces = createEigenfaces(), ffaces = createFisherfaces(), lbph = createLBPH(); Ptr<Algorithm> efaces = createEigenfaces_hidden(), ffaces = createFisherfaces_hidden(), lbph = createLBPH_hidden();
return efaces->info() != 0 && ffaces->info() != 0 && lbph->info() != 0; return efaces->info() != 0 && ffaces->info() != 0 && lbph->info() != 0;
} }

View File

@ -254,14 +254,14 @@ namespace cv
} //namespace cv } //namespace cv
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \ #define CV_INIT_ALGORITHM(classname, algname, memberinit) \
static ::cv::Algorithm* create##classname() \ static ::cv::Algorithm* create##classname##_hidden() \
{ \ { \
return new classname; \ return new classname; \
} \ } \
\ \
static ::cv::AlgorithmInfo& classname##_info() \ static ::cv::AlgorithmInfo& classname##_info() \
{ \ { \
static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname); \ static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname##_hidden); \
return classname##_info_var; \ return classname##_info_var; \
} \ } \
\ \

View File

@ -614,10 +614,10 @@ PERF_TEST_P(Video_Cn_LearningRate, Video_MOG,
} }
else else
{ {
cv::BackgroundSubtractorMOG mog; cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground; cv::Mat foreground;
mog(frame, foreground, learningRate); mog->apply(frame, foreground, learningRate);
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
{ {
@ -635,7 +635,7 @@ PERF_TEST_P(Video_Cn_LearningRate, Video_MOG,
} }
startTimer(); next(); startTimer(); next();
mog(frame, foreground, learningRate); mog->apply(frame, foreground, learningRate);
stopTimer(); stopTimer();
} }
@ -709,12 +709,12 @@ PERF_TEST_P(Video_Cn, Video_MOG2,
} }
else else
{ {
cv::BackgroundSubtractorMOG2 mog2; cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
mog2.set("detectShadows", false); mog2->set("detectShadows", false);
cv::Mat foreground; cv::Mat foreground;
mog2(frame, foreground); mog2->apply(frame, foreground);
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
{ {
@ -732,7 +732,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2,
} }
startTimer(); next(); startTimer(); next();
mog2(frame, foreground); mog2->apply(frame, foreground);
stopTimer(); stopTimer();
} }
@ -789,7 +789,7 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage,
} }
else else
{ {
cv::BackgroundSubtractorMOG2 mog2; cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
cv::Mat foreground; cv::Mat foreground;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
@ -807,12 +807,12 @@ PERF_TEST_P(Video_Cn, Video_MOG2GetBackgroundImage,
cv::swap(temp, frame); cv::swap(temp, frame);
} }
mog2(frame, foreground); mog2->apply(frame, foreground);
} }
cv::Mat background; cv::Mat background;
TEST_CYCLE() mog2.getBackgroundImage(background); TEST_CYCLE() mog2->getBackgroundImage(background);
CPU_SANITY_CHECK(background); CPU_SANITY_CHECK(background);
} }
@ -958,11 +958,11 @@ PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG,
cv::Mat foreground; cv::Mat foreground;
cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0)); cv::Mat zeros(frame.size(), CV_8UC1, cv::Scalar::all(0));
cv::BackgroundSubtractorGMG gmg; cv::Ptr<cv::BackgroundSubtractor> gmg = cv::createBackgroundSubtractorGMG();
gmg.set("maxFeatures", maxFeatures); gmg->set("maxFeatures", maxFeatures);
gmg.initialize(frame.size(), 0.0, 255.0); //gmg.initialize(frame.size(), 0.0, 255.0);
gmg(frame, foreground); gmg->apply(frame, foreground);
for (int i = 0; i < 150; ++i) for (int i = 0; i < 150; ++i)
{ {
@ -985,7 +985,7 @@ PERF_TEST_P(Video_Cn_MaxFeatures, Video_GMG,
} }
startTimer(); next(); startTimer(); next();
gmg(frame, foreground); gmg->apply(frame, foreground);
stopTimer(); stopTimer();
} }

View File

@ -245,8 +245,8 @@ GPU_TEST_P(MOG2, Update)
mog2.bShadowDetection = detectShadow; mog2.bShadowDetection = detectShadow;
cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi); cv::gpu::GpuMat foreground = createMat(frame.size(), CV_8UC1, useRoi);
cv::BackgroundSubtractorMOG2 mog2_gold; cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
mog2_gold.set("detectShadows", detectShadow); mog2_gold.setDetectShadows(detectShadow);
cv::Mat foreground_gold; cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
@ -263,7 +263,7 @@ GPU_TEST_P(MOG2, Update)
mog2(loadMat(frame, useRoi), foreground); mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold); mog2_gold->apply(frame, foreground_gold);
if (detectShadow) if (detectShadow)
{ {
@ -290,8 +290,8 @@ GPU_TEST_P(MOG2, getBackgroundImage)
mog2.bShadowDetection = detectShadow; mog2.bShadowDetection = detectShadow;
cv::gpu::GpuMat foreground; cv::gpu::GpuMat foreground;
cv::BackgroundSubtractorMOG2 mog2_gold; cv::Ptr<cv::BackgroundSubtractorMOG2> mog2_gold = cv::createBackgroundSubtractorMOG2();
mog2_gold.set("detectShadows", detectShadow); mog2_gold.setDetectShadows(detectShadow);
cv::Mat foreground_gold; cv::Mat foreground_gold;
for (int i = 0; i < 10; ++i) for (int i = 0; i < 10; ++i)
@ -301,14 +301,14 @@ GPU_TEST_P(MOG2, getBackgroundImage)
mog2(loadMat(frame, useRoi), foreground); mog2(loadMat(frame, useRoi), foreground);
mog2_gold(frame, foreground_gold); mog2_gold->apply(frame, foreground_gold);
} }
cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi); cv::gpu::GpuMat background = createMat(frame.size(), frame.type(), useRoi);
mog2.getBackgroundImage(background); mog2.getBackgroundImage(background);
cv::Mat background_gold; cv::Mat background_gold;
mog2_gold.getBackgroundImage(background_gold); mog2_gold->getBackgroundImage(background_gold);
ASSERT_MAT_NEAR(background_gold, background, 0); ASSERT_MAT_NEAR(background_gold, background, 0);
} }

View File

@ -50,7 +50,7 @@ icvReleaseGaussianBGModel( CvGaussBGModel** bg_model )
if( *bg_model ) if( *bg_model )
{ {
delete (cv::BackgroundSubtractorMOG*)((*bg_model)->mog); delete (cv::Ptr<cv::BackgroundSubtractor>*)((*bg_model)->mog);
cvReleaseImage( &(*bg_model)->background ); cvReleaseImage( &(*bg_model)->background );
cvReleaseImage( &(*bg_model)->foreground ); cvReleaseImage( &(*bg_model)->foreground );
memset( *bg_model, 0, sizeof(**bg_model) ); memset( *bg_model, 0, sizeof(**bg_model) );
@ -65,10 +65,10 @@ icvUpdateGaussianBGModel( IplImage* curr_frame, CvGaussBGModel* bg_model, doubl
{ {
cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground); cv::Mat image = cv::cvarrToMat(curr_frame), mask = cv::cvarrToMat(bg_model->foreground);
cv::BackgroundSubtractorMOG* mog = (cv::BackgroundSubtractorMOG*)(bg_model->mog); cv::Ptr<cv::BackgroundSubtractor>* mog = (cv::Ptr<cv::BackgroundSubtractor>*)(bg_model->mog);
CV_Assert(mog != 0); CV_Assert(mog != 0);
(*mog)(image, mask, learningRate); (*mog)->apply(image, mask, learningRate);
bg_model->countFrames++; bg_model->countFrames++;
return 0; return 0;
@ -105,13 +105,11 @@ cvCreateGaussianBGModel( IplImage* first_frame, CvGaussBGStatModelParams* parame
bg_model->params = params; bg_model->params = params;
cv::BackgroundSubtractorMOG* mog = cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG(params.win_size, params.n_gauss,
new cv::BackgroundSubtractorMOG(params.win_size, params.bg_threshold);
params.n_gauss, cv::Ptr<cv::BackgroundSubtractor>* pmog = new cv::Ptr<cv::BackgroundSubtractor>;
params.bg_threshold, *pmog = mog;
params.variance_init); bg_model->mog = pmog;
bg_model->mog = mog;
CvSize sz = cvGetSize(first_frame); CvSize sz = cvGetSize(first_frame);
bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels); bg_model->background = cvCreateImage(sz, IPL_DEPTH_8U, first_frame->nChannels);

View File

@ -56,7 +56,7 @@ CV_INIT_ALGORITHM(EM, "StatModel.EM",
bool initModule_ml(void) bool initModule_ml(void)
{ {
Ptr<Algorithm> em = createEM(); Ptr<Algorithm> em = createEM_hidden();
return em->info() != 0; return em->info() != 0;
} }

View File

@ -67,7 +67,7 @@ CV_INIT_ALGORITHM(SIFT, "Feature2D.SIFT",
bool initModule_nonfree(void) bool initModule_nonfree(void)
{ {
Ptr<Algorithm> sift = createSIFT(), surf = createSURF(); Ptr<Algorithm> sift = createSIFT_hidden(), surf = createSURF_hidden();
return sift->info() != 0 && surf->info() != 0; return sift->info() != 0 && surf->info() != 0;
} }

View File

@ -125,6 +125,7 @@ typedef Ptr<FeatureDetector> Ptr_FeatureDetector;
typedef Ptr<DescriptorExtractor> Ptr_DescriptorExtractor; typedef Ptr<DescriptorExtractor> Ptr_DescriptorExtractor;
typedef Ptr<Feature2D> Ptr_Feature2D; typedef Ptr<Feature2D> Ptr_Feature2D;
typedef Ptr<DescriptorMatcher> Ptr_DescriptorMatcher; typedef Ptr<DescriptorMatcher> Ptr_DescriptorMatcher;
typedef Ptr<BackgroundSubtractor> Ptr_BackgroundSubtractor;
typedef Ptr<cv::softcascade::ChannelFeatureBuilder> Ptr_ChannelFeatureBuilder; typedef Ptr<cv::softcascade::ChannelFeatureBuilder> Ptr_ChannelFeatureBuilder;

View File

@ -58,8 +58,8 @@ CV_INIT_ALGORITHM(SCascade, "CascadeDetector.SCascade",
bool initModule_softcascade(void) bool initModule_softcascade(void)
{ {
Ptr<Algorithm> sc = createSCascade(); Ptr<Algorithm> sc = createSCascade_hidden();
Ptr<Algorithm> sc1 = createDetector(); Ptr<Algorithm> sc1 = createDetector_hidden();
return (sc1->info() != 0) && (sc->info() != 0); return (sc1->info() != 0) && (sc->info() != 0);
} }

View File

@ -12,6 +12,7 @@
// //
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -54,17 +55,14 @@ namespace cv
The class is only used to define the common interface for The class is only used to define the common interface for
the whole family of background/foreground segmentation algorithms. the whole family of background/foreground segmentation algorithms.
*/ */
class CV_EXPORTS_W BackgroundSubtractor : public Algorithm class BackgroundSubtractor : public Algorithm
{ {
public: public:
//! the virtual destructor
virtual ~BackgroundSubtractor();
//! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image. //! the update operator that takes the next video frame and returns the current foreground mask as 8-bit binary image.
CV_WRAP_AS(apply) virtual void operator()(InputArray image, OutputArray fgmask, virtual void apply(InputArray image, OutputArray fgmask, double learningRate=0) = 0;
double learningRate=0);
//! computes a background image //! computes a background image
virtual void getBackgroundImage(OutputArray backgroundImage) const; virtual void getBackgroundImage(OutputArray backgroundImage) const = 0;
}; };
@ -78,35 +76,26 @@ public:
http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
*/ */
class CV_EXPORTS_W BackgroundSubtractorMOG : public BackgroundSubtractor class BackgroundSubtractorMOG : public BackgroundSubtractor
{ {
public: public:
//! the default constructor virtual int getHistory() const = 0;
CV_WRAP BackgroundSubtractorMOG(); virtual void setHistory(int nframes) = 0;
//! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
CV_WRAP BackgroundSubtractorMOG(int history, int nmixtures, double backgroundRatio, double noiseSigma=0);
//! the destructor
virtual ~BackgroundSubtractorMOG();
//! the update operator
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=0);
//! re-initiaization method virtual int getNMixtures() const = 0;
virtual void initialize(Size frameSize, int frameType); virtual void setNMixtures(int nmix) = 0;
virtual AlgorithmInfo* info() const; virtual double getBackgroundRatio() const = 0;
virtual void setBackgroundRatio(double backgroundRatio) = 0;
protected: virtual double getNoiseSigma() const = 0;
Size frameSize; virtual void setNoiseSigma(double noiseSigma) = 0;
int frameType;
Mat bgmodel;
int nframes;
int history;
int nmixtures;
double varThreshold;
double backgroundRatio;
double noiseSigma;
}; };
CV_EXPORTS Ptr<BackgroundSubtractorMOG>
createBackgroundSubtractorMOG(int history=200, int nmixtures=5,
double backgroundRatio=0.7, double noiseSigma=0);
/*! /*!
The class implements the following algorithm: The class implements the following algorithm:
@ -114,82 +103,51 @@ protected:
Z.Zivkovic Z.Zivkovic
International Conference Pattern Recognition, UK, August, 2004. International Conference Pattern Recognition, UK, August, 2004.
http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf http://www.zoranz.net/Publications/zivkovic2004ICPR.pdf
*/ */
class CV_EXPORTS BackgroundSubtractorMOG2 : public BackgroundSubtractor class BackgroundSubtractorMOG2 : public BackgroundSubtractor
{ {
public: public:
//! the default constructor virtual int getHistory() const = 0;
BackgroundSubtractorMOG2(); virtual void setHistory(int nframes) = 0;
//! the full constructor that takes the length of the history, the number of gaussian mixtures, the background ratio parameter and the noise strength
BackgroundSubtractorMOG2(int history, float varThreshold, bool bShadowDetection=true);
//! the destructor
virtual ~BackgroundSubtractorMOG2();
//! the update operator
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1);
//! computes a background image which are the mean of all background gaussians virtual int getNMixtures() const = 0;
virtual void getBackgroundImage(OutputArray backgroundImage) const; virtual void setNMixtures(int nmix) = 0;
//! re-initiaization method virtual double getBackgroundRatio() const = 0;
virtual void initialize(Size frameSize, int frameType); virtual void setBackgroundRatio(double backgroundRatio) = 0;
virtual AlgorithmInfo* info() const; virtual double getVarThreshold() const = 0;
virtual void setVarThreshold(double varThreshold) = 0;
protected: virtual double getVarThresholdGen() const = 0;
Size frameSize; virtual void setVarThresholdGen(double varThresholdGen) = 0;
int frameType;
Mat bgmodel;
Mat bgmodelUsedModes;//keep track of number of modes per pixel
int nframes;
int history;
int nmixtures;
//! here it is the maximum allowed number of mixture components.
//! Actual number is determined dynamically per pixel
double varThreshold;
// threshold on the squared Mahalanobis distance to decide if it is well described
// by the background model or not. Related to Cthr from the paper.
// This does not influence the update of the background. A typical value could be 4 sigma
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
///////////////////////// virtual double getVarInit() const = 0;
// less important parameters - things you might change but be carefull virtual void setVarInit(double varInit) = 0;
////////////////////////
float backgroundRatio;
// corresponds to fTB=1-cf from the paper
// TB - threshold when the component becomes significant enough to be included into
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
// it is considered foreground
// float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
//to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
//Smaller Tg leads to more generated components and higher Tg might make
//lead to small number of components but they can grow too large
float fVarInit;
float fVarMin;
float fVarMax;
//initial variance for the newly generated components.
//It will will influence the speed of adaptation. A good guess should be made.
//A simple way is to estimate the typical standard deviation from the images.
//I used here 10 as a reasonable value
// min and max can be used to further control the variance
float fCT;//CT - complexity reduction prior
//this is related to the number of samples needed to accept that a component
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
//shadow detection parameters virtual double getVarMin() const = 0;
bool bShadowDetection;//default 1 - do shadow detection virtual void setVarMin(double varMin) = 0;
unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value
float fTau; virtual double getVarMax() const = 0;
// Tau - shadow threshold. The shadow is detected if the pixel is darker virtual void setVarMax(double varMax) = 0;
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow virtual double getComplexityReductionThreshold() const = 0;
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003. virtual void setComplexityReductionThreshold(double ct) = 0;
virtual bool getDetectShadows() const = 0;
virtual void setDetectShadows(bool detectshadows) = 0;
virtual int getShadowValue() const = 0;
virtual void setShadowValue(int value) = 0;
virtual double getShadowThreshold() const = 0;
virtual void setShadowThreshold(double value) = 0;
}; };
CV_EXPORTS Ptr<BackgroundSubtractorMOG2>
createBackgroundSubtractorMOG2(int history=500, double varThreshold=16,
bool detectShadows=true);
/** /**
* Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1) * Background Subtractor module. Takes a series of images and returns a sequence of mask (8UC1)
* images of the same size, where 255 indicates Foreground and 0 represents Background. * images of the same size, where 255 indicates Foreground and 0 represents Background.
@ -197,66 +155,44 @@ protected:
* Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere, * Variable-Lighting Conditions for a Responsive Audio Art Installation," A. Godbehere,
* A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012. * A. Matsukawa, K. Goldberg, American Control Conference, Montreal, June 2012.
*/ */
class CV_EXPORTS BackgroundSubtractorGMG: public cv::BackgroundSubtractor class BackgroundSubtractorGMG : public BackgroundSubtractor
{ {
public: public:
BackgroundSubtractorGMG(); virtual int getMaxFeatures() const = 0;
virtual ~BackgroundSubtractorGMG(); virtual void setMaxFeatures(int maxFeatures) = 0;
virtual AlgorithmInfo* info() const;
/** virtual double getDefaultLearningRate() const = 0;
* Validate parameters and set up data structures for appropriate image size. virtual void setDefaultLearningRate(double lr) = 0;
* Must call before running on data.
* @param frameSize input frame size
* @param min minimum value taken on by pixels in image sequence. Usually 0
* @param max maximum value taken on by pixels in image sequence. e.g. 1.0 or 255
*/
void initialize(cv::Size frameSize, double min, double max);
/** virtual int getNumFrames() const = 0;
* Performs single-frame background subtraction and builds up a statistical background image virtual void setNumFrames(int nframes) = 0;
* model.
* @param image Input image
* @param fgmask Output mask image representing foreground and background pixels
*/
virtual void operator()(InputArray image, OutputArray fgmask, double learningRate=-1.0);
/** virtual int getQuantizationLevels() const = 0;
* Releases all inner buffers. virtual void setQuantizationLevels(int nlevels) = 0;
*/
void release();
//! Total number of distinct colors to maintain in histogram. virtual double getBackgroundPrior() const = 0;
int maxFeatures; virtual void setBackgroundPrior(double bgprior) = 0;
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
double learningRate;
//! Number of frames of video to use to initialize histograms.
int numInitializationFrames;
//! Number of discrete levels in each channel to be used in histograms.
int quantizationLevels;
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
double backgroundPrior;
//! Value above which pixel is determined to be FG.
double decisionThreshold;
//! Smoothing radius, in pixels, for cleaning up FG image.
int smoothingRadius;
//! Perform background model update
bool updateBackgroundModel;
private: virtual int getSmoothingRadius() const = 0;
double maxVal_; virtual void setSmoothingRadius(int radius) = 0;
double minVal_;
cv::Size frameSize_; virtual double getDecisionThreshold() const = 0;
int frameNum_; virtual void setDecisionThreshold(double thresh) = 0;
cv::Mat_<int> nfeatures_; virtual bool getUpdateBackgroundModel() const = 0;
cv::Mat_<unsigned int> colors_; virtual void setUpdateBackgroundModel(bool update) = 0;
cv::Mat_<float> weights_;
cv::Mat buf_; virtual double getMinVal() const = 0;
virtual void setMinVal(double val) = 0;
virtual double getMaxVal() const = 0;
virtual void setMaxVal(double val) = 0;
}; };
CV_EXPORTS Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames=120,
double decisionThreshold=0.8);
} }
#endif #endif

View File

@ -219,23 +219,6 @@ CVAPI(const CvMat*) cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement
#define cvKalmanUpdateByMeasurement cvKalmanCorrect #define cvKalmanUpdateByMeasurement cvKalmanCorrect
/****************************************************************************************\
* Image Alignment (ECC algorithm) *
\****************************************************************************************/
enum
{
MOTION_TRANSLATION,
MOTION_EUCLIDEAN,
MOTION_AFFINE,
MOTION_HOMOGRAPHY
};
/* Estimate the geometric transformation between 2 images (area-based alignment) */
CVAPI(double) cvFindTransformECC (const CvArr* templateImage, const CvArr* inputImage,
CvMat* warpMatrix,
const int motionType,
const CvTermCriteria criteria);
#ifdef __cplusplus #ifdef __cplusplus
} }
@ -341,6 +324,14 @@ CV_EXPORTS_W void calcOpticalFlowFarneback( InputArray prev, InputArray next,
CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst, CV_EXPORTS_W Mat estimateRigidTransform( InputArray src, InputArray dst,
bool fullAffine); bool fullAffine);
enum
{
MOTION_TRANSLATION=0,
MOTION_EUCLIDEAN=1,
MOTION_AFFINE=2,
MOTION_HOMOGRAPHY=3
};
//! estimates the best-fit Translation, Euclidean, Affine or Perspective Transformation //! estimates the best-fit Translation, Euclidean, Affine or Perspective Transformation
// with respect to Enhanced Correlation Coefficient criterion that maps one image to // with respect to Enhanced Correlation Coefficient criterion that maps one image to
// another (area-based alignment) // another (area-based alignment)

View File

@ -7,9 +7,11 @@
// copy or use the software. // copy or use the software.
// //
// //
// Intel License Agreement // License Agreement
// For Open Source Computer Vision Library
// //
// Copyright (C) 2000, Intel Corporation, all rights reserved. // Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -22,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. // and/or other materials provided with the distribution.
// //
// * The name of Intel Corporation may not be used to endorse or promote products // * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission. // derived from this software without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
@ -58,15 +60,6 @@
namespace cv namespace cv
{ {
BackgroundSubtractor::~BackgroundSubtractor() {}
void BackgroundSubtractor::operator()(InputArray, OutputArray, double)
{
}
void BackgroundSubtractor::getBackgroundImage(OutputArray) const
{
}
static const int defaultNMixtures = 5; static const int defaultNMixtures = 5;
static const int defaultHistory = 200; static const int defaultHistory = 200;
static const double defaultBackgroundRatio = 0.7; static const double defaultBackgroundRatio = 0.7;
@ -74,55 +67,88 @@ static const double defaultVarThreshold = 2.5*2.5;
static const double defaultNoiseSigma = 30*0.5; static const double defaultNoiseSigma = 30*0.5;
static const double defaultInitialWeight = 0.05; static const double defaultInitialWeight = 0.05;
BackgroundSubtractorMOG::BackgroundSubtractorMOG() class BackgroundSubtractorMOGImpl : public BackgroundSubtractorMOG
{ {
frameSize = Size(0,0); public:
frameType = 0; //! the default constructor
BackgroundSubtractorMOGImpl()
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0; nframes = 0;
nmixtures = defaultNMixtures; nmixtures = defaultNMixtures;
history = defaultHistory; history = defaultHistory;
varThreshold = defaultVarThreshold; varThreshold = defaultVarThreshold;
backgroundRatio = defaultBackgroundRatio; backgroundRatio = defaultBackgroundRatio;
noiseSigma = defaultNoiseSigma; noiseSigma = defaultNoiseSigma;
} }
// the full constructor that takes the length of the history,
// the number of gaussian mixtures, the background ratio parameter and the noise strength
BackgroundSubtractorMOGImpl(int _history, int _nmixtures, double _backgroundRatio, double _noiseSigma=0)
{
frameSize = Size(0,0);
frameType = 0;
BackgroundSubtractorMOG::BackgroundSubtractorMOG(int _history, int _nmixtures, nframes = 0;
double _backgroundRatio, nmixtures = std::min(_nmixtures > 0 ? _nmixtures : defaultNMixtures, 8);
double _noiseSigma) history = _history > 0 ? _history : defaultHistory;
{ varThreshold = defaultVarThreshold;
frameSize = Size(0,0); backgroundRatio = std::min(_backgroundRatio > 0 ? _backgroundRatio : 0.95, 1.);
frameType = 0; noiseSigma = _noiseSigma <= 0 ? defaultNoiseSigma : _noiseSigma;
}
nframes = 0; //! the update operator
nmixtures = std::min(_nmixtures > 0 ? _nmixtures : defaultNMixtures, 8); virtual void apply(InputArray image, OutputArray fgmask, double learningRate=0);
history = _history > 0 ? _history : defaultHistory;
varThreshold = defaultVarThreshold;
backgroundRatio = std::min(_backgroundRatio > 0 ? _backgroundRatio : 0.95, 1.);
noiseSigma = _noiseSigma <= 0 ? defaultNoiseSigma : _noiseSigma;
}
BackgroundSubtractorMOG::~BackgroundSubtractorMOG() //! re-initiaization method
{ virtual void initialize(Size _frameSize, int _frameType)
} {
frameSize = _frameSize;
frameType = _frameType;
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( CV_MAT_DEPTH(frameType) == CV_8U );
void BackgroundSubtractorMOG::initialize(Size _frameSize, int _frameType) // for each gaussian mixture of each pixel bg model we store ...
{ // the mixture sort key (w/sum_of_variances), the mixture weight (w),
frameSize = _frameSize; // the mean (nchannels values) and
frameType = _frameType; // the diagonal covariance matrix (another nchannels values)
nframes = 0; bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + 2*nchannels), CV_32F );
bgmodel = Scalar::all(0);
}
int nchannels = CV_MAT_CN(frameType); virtual AlgorithmInfo* info() const { return 0; }
CV_Assert( CV_MAT_DEPTH(frameType) == CV_8U );
// for each gaussian mixture of each pixel bg model we store ... virtual void getBackgroundImage(OutputArray) const
// the mixture sort key (w/sum_of_variances), the mixture weight (w), {
// the mean (nchannels values) and CV_Error( CV_StsNotImplemented, "" );
// the diagonal covariance matrix (another nchannels values) }
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + 2*nchannels), CV_32F );
bgmodel = Scalar::all(0); virtual int getHistory() const { return history; }
} virtual void setHistory(int _nframes) { history = _nframes; }
virtual int getNMixtures() const { return nmixtures; }
virtual void setNMixtures(int nmix) { nmixtures = nmix; }
virtual double getBackgroundRatio() const { return backgroundRatio; }
virtual void setBackgroundRatio(double _backgroundRatio) { backgroundRatio = _backgroundRatio; }
virtual double getNoiseSigma() const { return noiseSigma; }
virtual void setNoiseSigma(double _noiseSigma) { noiseSigma = _noiseSigma; }
protected:
Size frameSize;
int frameType;
Mat bgmodel;
int nframes;
int history;
int nmixtures;
double varThreshold;
double backgroundRatio;
double noiseSigma;
};
template<typename VT> struct MixData template<typename VT> struct MixData
@ -391,7 +417,7 @@ static void process8uC3( const Mat& image, Mat& fgmask, double learningRate,
} }
} }
void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask, double learningRate) void BackgroundSubtractorMOGImpl::apply(InputArray _image, OutputArray _fgmask, double learningRate)
{ {
Mat image = _image.getMat(); Mat image = _image.getMat();
bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType; bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType;
@ -415,6 +441,12 @@ void BackgroundSubtractorMOG::operator()(InputArray _image, OutputArray _fgmask,
CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" ); CV_Error( CV_StsUnsupportedFormat, "Only 1- and 3-channel 8-bit images are supported in BackgroundSubtractorMOG" );
} }
Ptr<BackgroundSubtractorMOG> createBackgroundSubtractorMOG(int history, int nmixtures,
double backgroundRatio, double noiseSigma)
{
return new BackgroundSubtractorMOGImpl(history, nmixtures, backgroundRatio, noiseSigma);
}
} }
/* End of file. */ /* End of file. */

View File

@ -7,9 +7,11 @@
// copy or use the software. // copy or use the software.
// //
// //
// Intel License Agreement // License Agreement
// For Open Source Computer Vision Library
// //
// Copyright (C) 2000, Intel Corporation, all rights reserved. // Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -22,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. // and/or other materials provided with the distribution.
// //
// * The name of Intel Corporation may not be used to endorse or promote products // * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission. // derived from this software without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
@ -114,6 +116,176 @@ static const float defaultfCT2 = 0.05f; // complexity reduction prior constant 0
static const unsigned char defaultnShadowDetection2 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection static const unsigned char defaultnShadowDetection2 = (unsigned char)127; // value to use in the segmentation mask for shadows, set 0 not to do shadow detection
static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation static const float defaultfTau = 0.5f; // Tau - shadow threshold, see the paper for explanation
class CV_EXPORTS BackgroundSubtractorMOG2Impl : public BackgroundSubtractorMOG2
{
public:
//! the default constructor
BackgroundSubtractorMOG2Impl()
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = defaultHistory2;
varThreshold = defaultVarThreshold2;
bShadowDetection = 1;
nmixtures = defaultNMixtures2;
backgroundRatio = defaultBackgroundRatio2;
fVarInit = defaultVarInit2;
fVarMax = defaultVarMax2;
fVarMin = defaultVarMin2;
varThresholdGen = defaultVarThresholdGen2;
fCT = defaultfCT2;
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;
}
//! the full constructor that takes the length of the history,
// the number of gaussian mixtures, the background ratio parameter and the noise strength
BackgroundSubtractorMOG2Impl(int _history, float _varThreshold, bool _bShadowDetection=true)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = _history > 0 ? _history : defaultHistory2;
varThreshold = (_varThreshold>0)? _varThreshold : defaultVarThreshold2;
bShadowDetection = _bShadowDetection;
nmixtures = defaultNMixtures2;
backgroundRatio = defaultBackgroundRatio2;
fVarInit = defaultVarInit2;
fVarMax = defaultVarMax2;
fVarMin = defaultVarMin2;
varThresholdGen = defaultVarThresholdGen2;
fCT = defaultfCT2;
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;
}
//! the destructor
~BackgroundSubtractorMOG2Impl() {}
//! the update operator
void apply(InputArray image, OutputArray fgmask, double learningRate=-1);
//! computes a background image which are the mean of all background gaussians
virtual void getBackgroundImage(OutputArray backgroundImage) const;
//! re-initiaization method
void initialize(Size _frameSize, int _frameType)
{
frameSize = _frameSize;
frameType = _frameType;
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels <= CV_CN_MAX );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture weight (w),
// the mean (nchannels values) and
// the covariance
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + nchannels), CV_32F );
//make the array for keeping track of the used modes per pixel - all zeros at start
bgmodelUsedModes.create(frameSize,CV_8U);
bgmodelUsedModes = Scalar::all(0);
}
virtual AlgorithmInfo* info() const { return 0; }
virtual int getHistory() const { return history; }
virtual void setHistory(int _nframes) { history = _nframes; }
virtual int getNMixtures() const { return nmixtures; }
virtual void setNMixtures(int nmix) { nmixtures = nmix; }
virtual double getBackgroundRatio() const { return backgroundRatio; }
virtual void setBackgroundRatio(double _backgroundRatio) { backgroundRatio = (float)_backgroundRatio; }
virtual double getVarThreshold() const { return varThreshold; }
virtual void setVarThreshold(double _varThreshold) { varThreshold = _varThreshold; }
virtual double getVarThresholdGen() const { return varThresholdGen; }
virtual void setVarThresholdGen(double _varThresholdGen) { varThresholdGen = (float)_varThresholdGen; }
virtual double getVarInit() const { return fVarInit; }
virtual void setVarInit(double varInit) { fVarInit = (float)varInit; }
virtual double getVarMin() const { return fVarMin; }
virtual void setVarMin(double varMin) { fVarMin = (float)varMin; }
virtual double getVarMax() const { return fVarMax; }
virtual void setVarMax(double varMax) { fVarMax = (float)varMax; }
virtual double getComplexityReductionThreshold() const { return fCT; }
virtual void setComplexityReductionThreshold(double ct) { fCT = (float)ct; }
virtual bool getDetectShadows() const { return bShadowDetection; }
virtual void setDetectShadows(bool detectshadows) { bShadowDetection = detectshadows; }
virtual int getShadowValue() const { return nShadowDetection; }
virtual void setShadowValue(int value) { nShadowDetection = (uchar)value; }
virtual double getShadowThreshold() const { return fTau; }
virtual void setShadowThreshold(double value) { fTau = (float)value; }
protected:
Size frameSize;
int frameType;
Mat bgmodel;
Mat bgmodelUsedModes;//keep track of number of modes per pixel
int nframes;
int history;
int nmixtures;
//! here it is the maximum allowed number of mixture components.
//! Actual number is determined dynamically per pixel
double varThreshold;
// threshold on the squared Mahalanobis distance to decide if it is well described
// by the background model or not. Related to Cthr from the paper.
// This does not influence the update of the background. A typical value could be 4 sigma
// and that is varThreshold=4*4=16; Corresponds to Tb in the paper.
/////////////////////////
// less important parameters - things you might change but be carefull
////////////////////////
float backgroundRatio;
// corresponds to fTB=1-cf from the paper
// TB - threshold when the component becomes significant enough to be included into
// the background model. It is the TB=1-cf from the paper. So I use cf=0.1 => TB=0.
// For alpha=0.001 it means that the mode should exist for approximately 105 frames before
// it is considered foreground
// float noiseSigma;
float varThresholdGen;
//correspondts to Tg - threshold on the squared Mahalan. dist. to decide
//when a sample is close to the existing components. If it is not close
//to any a new component will be generated. I use 3 sigma => Tg=3*3=9.
//Smaller Tg leads to more generated components and higher Tg might make
//lead to small number of components but they can grow too large
float fVarInit;
float fVarMin;
float fVarMax;
//initial variance for the newly generated components.
//It will will influence the speed of adaptation. A good guess should be made.
//A simple way is to estimate the typical standard deviation from the images.
//I used here 10 as a reasonable value
// min and max can be used to further control the variance
float fCT;//CT - complexity reduction prior
//this is related to the number of samples needed to accept that a component
//actually exists. We use CT=0.05 of all the samples. By setting CT=0 you get
//the standard Stauffer&Grimson algorithm (maybe not exact but very similar)
//shadow detection parameters
bool bShadowDetection;//default 1 - do shadow detection
unsigned char nShadowDetection;//do shadow detection - insert this value as the detection result - 127 default value
float fTau;
// Tau - shadow threshold. The shadow is detected if the pixel is darker
//version of the background. Tau is a threshold on how much darker the shadow can be.
//Tau= 0.5 means that if pixel is more than 2 times darker then it is not shadow
//See: Prati,Mikic,Trivedi,Cucchiarra,"Detecting Moving Shadows...",IEEE PAMI,2003.
};
struct GaussBGStatModel2Params struct GaussBGStatModel2Params
{ {
//image info //image info
@ -248,8 +420,9 @@ detectShadowGMM(const float* data, int nchannels, int nmodes,
//IEEE Trans. on Pattern Analysis and Machine Intelligence, vol.26, no.5, pages 651-656, 2004 //IEEE Trans. on Pattern Analysis and Machine Intelligence, vol.26, no.5, pages 651-656, 2004
//http://www.zoranz.net/Publications/zivkovic2004PAMI.pdf //http://www.zoranz.net/Publications/zivkovic2004PAMI.pdf
struct MOG2Invoker class MOG2Invoker : public ParallelLoopBody
{ {
public:
MOG2Invoker(const Mat& _src, Mat& _dst, MOG2Invoker(const Mat& _src, Mat& _dst,
GMM* _gmm, float* _mean, GMM* _gmm, float* _mean,
uchar* _modesUsed, uchar* _modesUsed,
@ -280,9 +453,9 @@ struct MOG2Invoker
cvtfunc = src->depth() != CV_32F ? getConvertFunc(src->depth(), CV_32F) : 0; cvtfunc = src->depth() != CV_32F ? getConvertFunc(src->depth(), CV_32F) : 0;
} }
void operator()(const BlockedRange& range) const void operator()(const Range& range) const
{ {
int y0 = range.begin(), y1 = range.end(); int y0 = range.start, y1 = range.end;
int ncols = src->cols, nchannels = src->channels(); int ncols = src->cols, nchannels = src->channels();
AutoBuffer<float> buf(src->cols*nchannels); AutoBuffer<float> buf(src->cols*nchannels);
float alpha1 = 1.f - alphaT; float alpha1 = 1.f - alphaT;
@ -479,75 +652,7 @@ struct MOG2Invoker
BinaryFunc cvtfunc; BinaryFunc cvtfunc;
}; };
BackgroundSubtractorMOG2::BackgroundSubtractorMOG2() void BackgroundSubtractorMOG2Impl::apply(InputArray _image, OutputArray _fgmask, double learningRate)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = defaultHistory2;
varThreshold = defaultVarThreshold2;
bShadowDetection = 1;
nmixtures = defaultNMixtures2;
backgroundRatio = defaultBackgroundRatio2;
fVarInit = defaultVarInit2;
fVarMax = defaultVarMax2;
fVarMin = defaultVarMin2;
varThresholdGen = defaultVarThresholdGen2;
fCT = defaultfCT2;
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;
}
BackgroundSubtractorMOG2::BackgroundSubtractorMOG2(int _history, float _varThreshold, bool _bShadowDetection)
{
frameSize = Size(0,0);
frameType = 0;
nframes = 0;
history = _history > 0 ? _history : defaultHistory2;
varThreshold = (_varThreshold>0)? _varThreshold : defaultVarThreshold2;
bShadowDetection = _bShadowDetection;
nmixtures = defaultNMixtures2;
backgroundRatio = defaultBackgroundRatio2;
fVarInit = defaultVarInit2;
fVarMax = defaultVarMax2;
fVarMin = defaultVarMin2;
varThresholdGen = defaultVarThresholdGen2;
fCT = defaultfCT2;
nShadowDetection = defaultnShadowDetection2;
fTau = defaultfTau;
}
BackgroundSubtractorMOG2::~BackgroundSubtractorMOG2()
{
}
void BackgroundSubtractorMOG2::initialize(Size _frameSize, int _frameType)
{
frameSize = _frameSize;
frameType = _frameType;
nframes = 0;
int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels <= CV_CN_MAX );
// for each gaussian mixture of each pixel bg model we store ...
// the mixture weight (w),
// the mean (nchannels values) and
// the covariance
bgmodel.create( 1, frameSize.height*frameSize.width*nmixtures*(2 + nchannels), CV_32F );
//make the array for keeping track of the used modes per pixel - all zeros at start
bgmodelUsedModes.create(frameSize,CV_8U);
bgmodelUsedModes = Scalar::all(0);
}
void BackgroundSubtractorMOG2::operator()(InputArray _image, OutputArray _fgmask, double learningRate)
{ {
Mat image = _image.getMat(); Mat image = _image.getMat();
bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType; bool needToInitialize = nframes == 0 || learningRate >= 1 || image.size() != frameSize || image.type() != frameType;
@ -562,18 +667,19 @@ void BackgroundSubtractorMOG2::operator()(InputArray _image, OutputArray _fgmask
learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./std::min( 2*nframes, history ); learningRate = learningRate >= 0 && nframes > 1 ? learningRate : 1./std::min( 2*nframes, history );
CV_Assert(learningRate >= 0); CV_Assert(learningRate >= 0);
parallel_for(BlockedRange(0, image.rows), parallel_for_(Range(0, image.rows),
MOG2Invoker(image, fgmask, MOG2Invoker(image, fgmask,
(GMM*)bgmodel.data, (GMM*)bgmodel.data,
(float*)(bgmodel.data + sizeof(GMM)*nmixtures*image.rows*image.cols), (float*)(bgmodel.data + sizeof(GMM)*nmixtures*image.rows*image.cols),
bgmodelUsedModes.data, nmixtures, (float)learningRate, bgmodelUsedModes.data, nmixtures, (float)learningRate,
(float)varThreshold, (float)varThreshold,
backgroundRatio, varThresholdGen, backgroundRatio, varThresholdGen,
fVarInit, fVarMin, fVarMax, float(-learningRate*fCT), fTau, fVarInit, fVarMin, fVarMax, float(-learningRate*fCT), fTau,
bShadowDetection, nShadowDetection)); bShadowDetection, nShadowDetection),
image.total()/(double)(1 << 16));
} }
void BackgroundSubtractorMOG2::getBackgroundImage(OutputArray backgroundImage) const void BackgroundSubtractorMOG2Impl::getBackgroundImage(OutputArray backgroundImage) const
{ {
int nchannels = CV_MAT_CN(frameType); int nchannels = CV_MAT_CN(frameType);
CV_Assert( nchannels == 3 ); CV_Assert( nchannels == 3 );
@ -626,6 +732,13 @@ void BackgroundSubtractorMOG2::getBackgroundImage(OutputArray backgroundImage) c
} }
} }
Ptr<BackgroundSubtractorMOG2> createBackgroundSubtractorMOG2(int _history, double _varThreshold,
bool _bShadowDetection)
{
return new BackgroundSubtractorMOG2Impl(_history, (float)_varThreshold, _bShadowDetection);
}
} }
/* End of file. */ /* End of file. */

View File

@ -8,8 +8,10 @@
// //
// //
// License Agreement // License Agreement
// For Open Source Computer Vision Library
// //
// Copyright (C) 2000, Intel Corporation, all rights reserved. // Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -22,7 +24,7 @@
// this list of conditions and the following disclaimer in the documentation // this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution. // and/or other materials provided with the distribution.
// //
// * The name of Intel Corporation may not be used to endorse or promote products // * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission. // derived from this software without specific prior written permission.
// //
// This software is provided by the copyright holders and contributors "as is" and // This software is provided by the copyright holders and contributors "as is" and
@ -48,7 +50,105 @@
#include "precomp.hpp" #include "precomp.hpp"
cv::BackgroundSubtractorGMG::BackgroundSubtractorGMG() namespace cv
{
class CV_EXPORTS BackgroundSubtractorGMGImpl : public BackgroundSubtractorGMG
{
public:
BackgroundSubtractorGMGImpl();
~BackgroundSubtractorGMGImpl();
virtual AlgorithmInfo* info() const { return 0; }
/**
* Validate parameters and set up data structures for appropriate image size.
* Must call before running on data.
* @param frameSize input frame size
* @param min minimum value taken on by pixels in image sequence. Usually 0
* @param max maximum value taken on by pixels in image sequence. e.g. 1.0 or 255
*/
void initialize(Size frameSize, double minVal, double maxVal);
/**
* Performs single-frame background subtraction and builds up a statistical background image
* model.
* @param image Input image
* @param fgmask Output mask image representing foreground and background pixels
*/
virtual void apply(InputArray image, OutputArray fgmask, double learningRate=-1.0);
/**
* Releases all inner buffers.
*/
void release();
virtual int getMaxFeatures() const { return maxFeatures; }
virtual void setMaxFeatures(int _maxFeatures) { maxFeatures = _maxFeatures; }
virtual double getDefaultLearningRate() const { return learningRate; }
virtual void setDefaultLearningRate(double lr) { learningRate = lr; }
virtual int getNumFrames() const { return numInitializationFrames; }
virtual void setNumFrames(int nframes) { numInitializationFrames = nframes; }
virtual int getQuantizationLevels() const { return quantizationLevels; }
virtual void setQuantizationLevels(int nlevels) { quantizationLevels = nlevels; }
virtual double getBackgroundPrior() const { return backgroundPrior; }
virtual void setBackgroundPrior(double bgprior) { backgroundPrior = bgprior; }
virtual int getSmoothingRadius() const { return smoothingRadius; }
virtual void setSmoothingRadius(int radius) { smoothingRadius = radius; }
virtual double getDecisionThreshold() const { return decisionThreshold; }
virtual void setDecisionThreshold(double thresh) { decisionThreshold = thresh; }
virtual bool getUpdateBackgroundModel() const { return updateBackgroundModel; }
virtual void setUpdateBackgroundModel(bool update) { updateBackgroundModel = update; }
virtual double getMinVal() const { return minVal_; }
virtual void setMinVal(double val) { minVal_ = val; }
virtual double getMaxVal() const { return maxVal_; }
virtual void setMaxVal(double val) { maxVal_ = val; }
virtual void getBackgroundImage(OutputArray) const
{
CV_Error( CV_StsNotImplemented, "" );
}
//! Total number of distinct colors to maintain in histogram.
int maxFeatures;
//! Set between 0.0 and 1.0, determines how quickly features are "forgotten" from histograms.
double learningRate;
//! Number of frames of video to use to initialize histograms.
int numInitializationFrames;
//! Number of discrete levels in each channel to be used in histograms.
int quantizationLevels;
//! Prior probability that any given pixel is a background pixel. A sensitivity parameter.
double backgroundPrior;
//! Value above which pixel is determined to be FG.
double decisionThreshold;
//! Smoothing radius, in pixels, for cleaning up FG image.
int smoothingRadius;
//! Perform background model update
bool updateBackgroundModel;
private:
double maxVal_;
double minVal_;
Size frameSize_;
int frameNum_;
Mat_<int> nfeatures_;
Mat_<unsigned int> colors_;
Mat_<float> weights_;
Mat buf_;
};
BackgroundSubtractorGMGImpl::BackgroundSubtractorGMGImpl()
{ {
/* /*
* Default Parameter Values. Override with algorithm "set" method. * Default Parameter Values. Override with algorithm "set" method.
@ -61,23 +161,24 @@ cv::BackgroundSubtractorGMG::BackgroundSubtractorGMG()
decisionThreshold = 0.8; decisionThreshold = 0.8;
smoothingRadius = 7; smoothingRadius = 7;
updateBackgroundModel = true; updateBackgroundModel = true;
minVal_ = maxVal_ = 0;
} }
cv::BackgroundSubtractorGMG::~BackgroundSubtractorGMG() BackgroundSubtractorGMGImpl::~BackgroundSubtractorGMGImpl()
{ {
} }
void cv::BackgroundSubtractorGMG::initialize(cv::Size frameSize, double min, double max) void BackgroundSubtractorGMGImpl::initialize(Size frameSize, double minVal, double maxVal)
{ {
CV_Assert(min < max); CV_Assert(minVal < maxVal);
CV_Assert(maxFeatures > 0); CV_Assert(maxFeatures > 0);
CV_Assert(learningRate >= 0.0 && learningRate <= 1.0); CV_Assert(learningRate >= 0.0 && learningRate <= 1.0);
CV_Assert(numInitializationFrames >= 1); CV_Assert(numInitializationFrames >= 1);
CV_Assert(quantizationLevels >= 1 && quantizationLevels <= 255); CV_Assert(quantizationLevels >= 1 && quantizationLevels <= 255);
CV_Assert(backgroundPrior >= 0.0 && backgroundPrior <= 1.0); CV_Assert(backgroundPrior >= 0.0 && backgroundPrior <= 1.0);
minVal_ = min; minVal_ = minVal;
maxVal_ = max; maxVal_ = maxVal;
frameSize_ = frameSize; frameSize_ = frameSize;
frameNum_ = 0; frameNum_ = 0;
@ -86,7 +187,7 @@ void cv::BackgroundSubtractorGMG::initialize(cv::Size frameSize, double min, dou
colors_.create(frameSize_.area(), maxFeatures); colors_.create(frameSize_.area(), maxFeatures);
weights_.create(frameSize_.area(), maxFeatures); weights_.create(frameSize_.area(), maxFeatures);
nfeatures_.setTo(cv::Scalar::all(0)); nfeatures_.setTo(Scalar::all(0));
} }
namespace namespace
@ -181,10 +282,10 @@ namespace
} }
}; };
class GMG_LoopBody : public cv::ParallelLoopBody class GMG_LoopBody : public ParallelLoopBody
{ {
public: public:
GMG_LoopBody(const cv::Mat& frame, const cv::Mat& fgmask, const cv::Mat_<int>& nfeatures, const cv::Mat_<unsigned int>& colors, const cv::Mat_<float>& weights, GMG_LoopBody(const Mat& frame, const Mat& fgmask, const Mat_<int>& nfeatures, const Mat_<unsigned int>& colors, const Mat_<float>& weights,
int maxFeatures, double learningRate, int numInitializationFrames, int quantizationLevels, double backgroundPrior, double decisionThreshold, int maxFeatures, double learningRate, int numInitializationFrames, int quantizationLevels, double backgroundPrior, double decisionThreshold,
double maxVal, double minVal, int frameNum, bool updateBackgroundModel) : double maxVal, double minVal, int frameNum, bool updateBackgroundModel) :
frame_(frame), fgmask_(fgmask), nfeatures_(nfeatures), colors_(colors), weights_(weights), frame_(frame), fgmask_(fgmask), nfeatures_(nfeatures), colors_(colors), weights_(weights),
@ -194,16 +295,16 @@ namespace
{ {
} }
void operator() (const cv::Range& range) const; void operator() (const Range& range) const;
private: private:
cv::Mat frame_; Mat frame_;
mutable cv::Mat_<uchar> fgmask_; mutable Mat_<uchar> fgmask_;
mutable cv::Mat_<int> nfeatures_; mutable Mat_<int> nfeatures_;
mutable cv::Mat_<unsigned int> colors_; mutable Mat_<unsigned int> colors_;
mutable cv::Mat_<float> weights_; mutable Mat_<float> weights_;
int maxFeatures_; int maxFeatures_;
double learningRate_; double learningRate_;
@ -218,7 +319,7 @@ namespace
int frameNum_; int frameNum_;
}; };
void GMG_LoopBody::operator() (const cv::Range& range) const void GMG_LoopBody::operator() (const Range& range) const
{ {
typedef unsigned int (*func_t)(const void* src_, int x, int cn, double minVal, double maxVal, int quantizationLevels); typedef unsigned int (*func_t)(const void* src_, int x, int cn, double minVal, double maxVal, int quantizationLevels);
static const func_t funcs[] = static const func_t funcs[] =
@ -296,7 +397,7 @@ namespace
} }
} }
void cv::BackgroundSubtractorGMG::operator ()(InputArray _frame, OutputArray _fgmask, double newLearningRate) void BackgroundSubtractorGMGImpl::apply(InputArray _frame, OutputArray _fgmask, double newLearningRate)
{ {
Mat frame = _frame.getMat(); Mat frame = _frame.getMat();
@ -310,7 +411,16 @@ void cv::BackgroundSubtractorGMG::operator ()(InputArray _frame, OutputArray _fg
} }
if (frame.size() != frameSize_) if (frame.size() != frameSize_)
initialize(frame.size(), 0.0, frame.depth() == CV_8U ? 255.0 : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0); {
double minval = minVal_;
double maxval = maxVal_;
if( minVal_ == 0 && maxVal_ == 0 )
{
minval = 0;
maxval = frame.depth() == CV_8U ? 255.0 : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0;
}
initialize(frame.size(), minval, maxval);
}
_fgmask.create(frameSize_, CV_8UC1); _fgmask.create(frameSize_, CV_8UC1);
Mat fgmask = _fgmask.getMat(); Mat fgmask = _fgmask.getMat();
@ -323,19 +433,58 @@ void cv::BackgroundSubtractorGMG::operator ()(InputArray _frame, OutputArray _fg
if (smoothingRadius > 0) if (smoothingRadius > 0)
{ {
medianBlur(fgmask, buf_, smoothingRadius); medianBlur(fgmask, buf_, smoothingRadius);
cv::swap(fgmask, buf_); swap(fgmask, buf_);
} }
// keep track of how many frames we have processed // keep track of how many frames we have processed
++frameNum_; ++frameNum_;
} }
void cv::BackgroundSubtractorGMG::release() void BackgroundSubtractorGMGImpl::release()
{ {
frameSize_ = cv::Size(); frameSize_ = Size();
nfeatures_.release(); nfeatures_.release();
colors_.release(); colors_.release();
weights_.release(); weights_.release();
buf_.release(); buf_.release();
} }
Ptr<BackgroundSubtractorGMG> createBackgroundSubtractorGMG(int initializationFrames, double decisionThreshold)
{
Ptr<BackgroundSubtractorGMG> bgfg = new BackgroundSubtractorGMGImpl;
bgfg->setNumFrames(initializationFrames);
bgfg->setDecisionThreshold(decisionThreshold);
return bgfg;
}
/*
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorGMG, "BackgroundSubtractor.GMG",
obj.info()->addParam(obj, "maxFeatures", obj.maxFeatures,false,0,0,
"Maximum number of features to store in histogram. Harsh enforcement of sparsity constraint.");
obj.info()->addParam(obj, "learningRate", obj.learningRate,false,0,0,
"Adaptation rate of histogram. Close to 1, slow adaptation. Close to 0, fast adaptation, features forgotten quickly.");
obj.info()->addParam(obj, "initializationFrames", obj.numInitializationFrames,false,0,0,
"Number of frames to use to initialize histograms of pixels.");
obj.info()->addParam(obj, "quantizationLevels", obj.quantizationLevels,false,0,0,
"Number of discrete colors to be used in histograms. Up-front quantization.");
obj.info()->addParam(obj, "backgroundPrior", obj.backgroundPrior,false,0,0,
"Prior probability that each individual pixel is a background pixel.");
obj.info()->addParam(obj, "smoothingRadius", obj.smoothingRadius,false,0,0,
"Radius of smoothing kernel to filter noise from FG mask image.");
obj.info()->addParam(obj, "decisionThreshold", obj.decisionThreshold,false,0,0,
"Threshold for FG decision rule. Pixel is FG if posterior probability exceeds threshold.");
obj.info()->addParam(obj, "updateBackgroundModel", obj.updateBackgroundModel,false,0,0,
"Perform background model update.");
obj.info()->addParam(obj, "minVal", obj.minVal_,false,0,0,
"Minimum of the value range (mostly for regression testing)");
obj.info()->addParam(obj, "maxVal", obj.maxVal_,false,0,0,
"Maximum of the value range (mostly for regression testing)");
);
*/
}

View File

@ -7,10 +7,11 @@
// copy or use the software. // copy or use the software.
// //
// //
// Intel License Agreement // License Agreement
// For Open Source Computer Vision Library // For Open Source Computer Vision Library
// //
// Copyright (C) 2000, Intel Corporation, all rights reserved. // Copyright (C) 2000, Intel Corporation, all rights reserved.
// Copyright (C) 2013, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners. // Third party copyrights are property of their respective owners.
// //
// Redistribution and use in source and binary forms, with or without modification, // Redistribution and use in source and binary forms, with or without modification,
@ -40,285 +41,156 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria )
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: cvMeanShift
// Purpose: MeanShift algorithm
// Context:
// Parameters:
// imgProb - 2D object probability distribution
// windowIn - CvRect of CAMSHIFT Window intial size
// numIters - If CAMSHIFT iterates this many times, stop
// windowOut - Location, height and width of converged CAMSHIFT window
// len - If != NULL, return equivalent len
// width - If != NULL, return equivalent width
// Returns:
// Number of iterations CAMSHIFT took to converge
// Notes:
//F*/
CV_IMPL int
cvMeanShift( const void* imgProb, CvRect windowIn,
CvTermCriteria criteria, CvConnectedComp* comp )
{ {
CvMoments moments; Mat mat = _probImage.getMat();
int i = 0, eps; Rect cur_rect = window;
CvMat stub, *mat = (CvMat*)imgProb;
CvMat cur_win;
CvRect cur_rect = windowIn;
if( comp ) CV_Assert( mat.channels() == 1 );
comp->rect = windowIn;
moments.m00 = moments.m10 = moments.m01 = 0; if( window.height <= 0 || window.width <= 0 )
mat = cvGetMat( mat, &stub );
if( CV_MAT_CN( mat->type ) > 1 )
CV_Error( CV_BadNumChannels, cvUnsupportedFormat );
if( windowIn.height <= 0 || windowIn.width <= 0 )
CV_Error( CV_StsBadArg, "Input window has non-positive sizes" ); CV_Error( CV_StsBadArg, "Input window has non-positive sizes" );
windowIn = cv::Rect(windowIn) & cv::Rect(0, 0, mat->cols, mat->rows); window = window & Rect(0, 0, mat.cols, mat.rows);
criteria = cvCheckTermCriteria( criteria, 1., 100 ); double eps = (criteria.type & TermCriteria::EPS) ? std::max(criteria.epsilon, 0.) : 1.;
eps = cvRound( criteria.epsilon * criteria.epsilon ); eps = cvRound(eps*eps);
int i, niters = (criteria.type & TermCriteria::MAX_ITER) ? std::max(criteria.maxCount, 1) : 100;
for( i = 0; i < criteria.max_iter; i++ ) for( i = 0; i < niters; i++ )
{ {
int dx, dy, nx, ny; cur_rect = cur_rect & Rect(0, 0, mat.cols, mat.rows);
double inv_m00; if( cur_rect == Rect() )
cur_rect = cv::Rect(cur_rect) & cv::Rect(0, 0, mat->cols, mat->rows);
if( cv::Rect(cur_rect) == cv::Rect() )
{ {
cur_rect.x = mat->cols/2; cur_rect.x = mat.cols/2;
cur_rect.y = mat->rows/2; cur_rect.y = mat.rows/2;
} }
cur_rect.width = MAX(cur_rect.width, 1); cur_rect.width = std::max(cur_rect.width, 1);
cur_rect.height = MAX(cur_rect.height, 1); cur_rect.height = std::max(cur_rect.height, 1);
cvGetSubRect( mat, &cur_win, cur_rect ); Moments m = moments(mat(cur_rect));
cvMoments( &cur_win, &moments );
/* Calculating center of mass */ // Calculating center of mass
if( fabs(moments.m00) < DBL_EPSILON ) if( fabs(m.m00) < DBL_EPSILON )
break; break;
inv_m00 = moments.inv_sqrt_m00*moments.inv_sqrt_m00; int dx = cvRound( m.m10/m.m00 - window.width*0.5 );
dx = cvRound( moments.m10 * inv_m00 - windowIn.width*0.5 ); int dy = cvRound( m.m01/m.m00 - window.height*0.5 );
dy = cvRound( moments.m01 * inv_m00 - windowIn.height*0.5 );
nx = cur_rect.x + dx; int nx = std::min(std::max(cur_rect.x + dx, 0), mat.cols - cur_rect.width);
ny = cur_rect.y + dy; int ny = std::min(std::max(cur_rect.y + dy, 0), mat.rows - cur_rect.height);
if( nx < 0 )
nx = 0;
else if( nx + cur_rect.width > mat->cols )
nx = mat->cols - cur_rect.width;
if( ny < 0 )
ny = 0;
else if( ny + cur_rect.height > mat->rows )
ny = mat->rows - cur_rect.height;
dx = nx - cur_rect.x; dx = nx - cur_rect.x;
dy = ny - cur_rect.y; dy = ny - cur_rect.y;
cur_rect.x = nx; cur_rect.x = nx;
cur_rect.y = ny; cur_rect.y = ny;
/* Check for coverage centers mass & window */ // Check for coverage centers mass & window
if( dx*dx + dy*dy < eps ) if( dx*dx + dy*dy < eps )
break; break;
} }
if( comp ) window = cur_rect;
{
comp->rect = cur_rect;
comp->area = (float)moments.m00;
}
return i; return i;
} }
/*F///////////////////////////////////////////////////////////////////////////////////////
// Name: cvCamShift
// Purpose: CAMSHIFT algorithm
// Context:
// Parameters:
// imgProb - 2D object probability distribution
// windowIn - CvRect of CAMSHIFT Window intial size
// criteria - criteria of stop finding window
// windowOut - Location, height and width of converged CAMSHIFT window
// orientation - If != NULL, return distribution orientation
// len - If != NULL, return equivalent len
// width - If != NULL, return equivalent width
// area - sum of all elements in result window
// Returns:
// Number of iterations CAMSHIFT took to converge
// Notes:
//F*/
CV_IMPL int
cvCamShift( const void* imgProb, CvRect windowIn,
CvTermCriteria criteria,
CvConnectedComp* _comp,
CvBox2D* box )
{
const int TOLERANCE = 10;
CvMoments moments;
double m00 = 0, m10, m01, mu20, mu11, mu02, inv_m00;
double a, b, c, xc, yc;
double rotate_a, rotate_c;
double theta = 0, square;
double cs, sn;
double length = 0, width = 0;
int itersUsed = 0;
CvConnectedComp comp;
CvMat cur_win, stub, *mat = (CvMat*)imgProb;
comp.rect = windowIn;
mat = cvGetMat( mat, &stub );
itersUsed = cvMeanShift( mat, windowIn, criteria, &comp );
windowIn = comp.rect;
windowIn.x -= TOLERANCE;
if( windowIn.x < 0 )
windowIn.x = 0;
windowIn.y -= TOLERANCE;
if( windowIn.y < 0 )
windowIn.y = 0;
windowIn.width += 2 * TOLERANCE;
if( windowIn.x + windowIn.width > mat->width )
windowIn.width = mat->width - windowIn.x;
windowIn.height += 2 * TOLERANCE;
if( windowIn.y + windowIn.height > mat->height )
windowIn.height = mat->height - windowIn.y;
cvGetSubRect( mat, &cur_win, windowIn );
/* Calculating moments in new center mass */
cvMoments( &cur_win, &moments );
m00 = moments.m00;
m10 = moments.m10;
m01 = moments.m01;
mu11 = moments.mu11;
mu20 = moments.mu20;
mu02 = moments.mu02;
if( fabs(m00) < DBL_EPSILON )
return -1;
inv_m00 = 1. / m00;
xc = cvRound( m10 * inv_m00 + windowIn.x );
yc = cvRound( m01 * inv_m00 + windowIn.y );
a = mu20 * inv_m00;
b = mu11 * inv_m00;
c = mu02 * inv_m00;
/* Calculating width & height */
square = sqrt( 4 * b * b + (a - c) * (a - c) );
/* Calculating orientation */
theta = atan2( 2 * b, a - c + square );
/* Calculating width & length of figure */
cs = cos( theta );
sn = sin( theta );
rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02;
rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02;
length = sqrt( rotate_a * inv_m00 ) * 4;
width = sqrt( rotate_c * inv_m00 ) * 4;
/* In case, when tetta is 0 or 1.57... the Length & Width may be exchanged */
if( length < width )
{
double t;
CV_SWAP( length, width, t );
CV_SWAP( cs, sn, t );
theta = CV_PI*0.5 - theta;
}
/* Saving results */
if( _comp || box )
{
int t0, t1;
int _xc = cvRound( xc );
int _yc = cvRound( yc );
t0 = cvRound( fabs( length * cs ));
t1 = cvRound( fabs( width * sn ));
t0 = MAX( t0, t1 ) + 2;
comp.rect.width = MIN( t0, (mat->width - _xc) * 2 );
t0 = cvRound( fabs( length * sn ));
t1 = cvRound( fabs( width * cs ));
t0 = MAX( t0, t1 ) + 2;
comp.rect.height = MIN( t0, (mat->height - _yc) * 2 );
comp.rect.x = MAX( 0, _xc - comp.rect.width / 2 );
comp.rect.y = MAX( 0, _yc - comp.rect.height / 2 );
comp.rect.width = MIN( mat->width - comp.rect.x, comp.rect.width );
comp.rect.height = MIN( mat->height - comp.rect.y, comp.rect.height );
comp.area = (float) m00;
}
if( _comp )
*_comp = comp;
if( box )
{
box->size.height = (float)length;
box->size.width = (float)width;
box->angle = (float)((CV_PI*0.5+theta)*180./CV_PI);
while(box->angle < 0)
box->angle += 360;
while(box->angle >= 360)
box->angle -= 360;
if(box->angle >= 180)
box->angle -= 180;
box->center = cvPoint2D32f( comp.rect.x + comp.rect.width*0.5f,
comp.rect.y + comp.rect.height*0.5f);
}
return itersUsed;
}
cv::RotatedRect cv::CamShift( InputArray _probImage, Rect& window, cv::RotatedRect cv::CamShift( InputArray _probImage, Rect& window,
TermCriteria criteria ) TermCriteria criteria )
{ {
CvConnectedComp comp; const int TOLERANCE = 10;
CvBox2D box; Mat mat = _probImage.getMat();
box.center.x = box.center.y = 0; box.angle = 0; box.size.width = box.size.height = 0; meanShift( mat, window, criteria );
comp.rect.x = comp.rect.y = comp.rect.width = comp.rect.height = 0;
Mat probImage = _probImage.getMat(); window.x -= TOLERANCE;
CvMat c_probImage = probImage; if( window.x < 0 )
cvCamShift(&c_probImage, window, (CvTermCriteria)criteria, &comp, &box); window.x = 0;
window = comp.rect;
return RotatedRect(Point2f(box.center), Size2f(box.size), box.angle);
}
int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria ) window.y -= TOLERANCE;
{ if( window.y < 0 )
CvConnectedComp comp; window.y = 0;
Mat probImage = _probImage.getMat();
CvMat c_probImage = probImage; window.width += 2 * TOLERANCE;
int iters = cvMeanShift(&c_probImage, window, (CvTermCriteria)criteria, &comp ); if( window.x + window.width > mat.cols )
window = comp.rect; window.width = mat.cols - window.x;
return iters;
window.height += 2 * TOLERANCE;
if( window.y + window.height > mat.rows )
window.height = mat.rows - window.y;
// Calculating moments in new center mass
Moments m = moments( mat(window) );
double m00 = m.m00, m10 = m.m10, m01 = m.m01;
double mu11 = m.mu11, mu20 = m.mu20, mu02 = m.mu02;
if( fabs(m00) < DBL_EPSILON )
return RotatedRect();
double inv_m00 = 1. / m00;
int xc = cvRound( m10 * inv_m00 + window.x );
int yc = cvRound( m01 * inv_m00 + window.y );
double a = mu20 * inv_m00, b = mu11 * inv_m00, c = mu02 * inv_m00;
// Calculating width & height
double square = std::sqrt( 4 * b * b + (a - c) * (a - c) );
// Calculating orientation
double theta = atan2( 2 * b, a - c + square );
// Calculating width & length of figure
double cs = cos( theta );
double sn = sin( theta );
double rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02;
double rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02;
double length = std::sqrt( rotate_a * inv_m00 ) * 4;
double width = std::sqrt( rotate_c * inv_m00 ) * 4;
// In case, when tetta is 0 or 1.57... the Length & Width may be exchanged
if( length < width )
{
std::swap( length, width );
std::swap( cs, sn );
theta = CV_PI*0.5 - theta;
}
// Saving results
int _xc = cvRound( xc );
int _yc = cvRound( yc );
int t0 = cvRound( fabs( length * cs ));
int t1 = cvRound( fabs( width * sn ));
t0 = MAX( t0, t1 ) + 2;
window.width = MIN( t0, (mat.cols - _xc) * 2 );
t0 = cvRound( fabs( length * sn ));
t1 = cvRound( fabs( width * cs ));
t0 = MAX( t0, t1 ) + 2;
window.height = MIN( t0, (mat.rows - _yc) * 2 );
window.x = MAX( 0, _xc - window.width / 2 );
window.y = MAX( 0, _yc - window.height / 2 );
window.width = MIN( mat.cols - window.x, window.width );
window.height = MIN( mat.rows - window.y, window.height );
RotatedRect box;
box.size.height = (float)length;
box.size.width = (float)width;
box.angle = (float)((CV_PI*0.5+theta)*180./CV_PI);
while(box.angle < 0)
box.angle += 360;
while(box.angle >= 360)
box.angle -= 360;
if(box.angle >= 180)
box.angle -= 180;
box.center = Point2f( window.x + window.width*0.5f, window.y + window.height*0.5f);
return box;
} }
/* End of file. */ /* End of file. */

View File

@ -305,23 +305,8 @@ static void update_warping_matrix_ECC (Mat& map_matrix, const Mat& update, const
mapPtr[3] = (float) sin(new_theta); mapPtr[3] = (float) sin(new_theta);
mapPtr[1] = -mapPtr[3]; mapPtr[1] = -mapPtr[3];
} }
} }
CV_IMPL double cvFindTransformECC (const CvArr* _image1, const CvArr* _image2,
CvMat* _map_matrix,
const int motionType,
const CvTermCriteria _criteria)
{
Mat image1 = cvarrToMat(_image1);
Mat image2 = cvarrToMat(_image2);
Mat map_matrix = cvarrToMat(_map_matrix);
double cc = cv::findTransformECC(image1, image2, map_matrix, motionType,
TermCriteria(TermCriteria::EPS+TermCriteria::COUNT, _criteria.max_iter, _criteria.epsilon));
return cc;
}
double cv::findTransformECC(InputArray templateImage, double cv::findTransformECC(InputArray templateImage,
InputArray inputImage, InputArray inputImage,

View File

@ -40,176 +40,6 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
CV_IMPL CvKalman*
cvCreateKalman( int DP, int MP, int CP )
{
CvKalman *kalman = 0;
if( DP <= 0 || MP <= 0 )
CV_Error( CV_StsOutOfRange,
"state and measurement vectors must have positive number of dimensions" );
if( CP < 0 )
CP = DP;
/* allocating memory for the structure */
kalman = (CvKalman *)cvAlloc( sizeof( CvKalman ));
memset( kalman, 0, sizeof(*kalman));
kalman->DP = DP;
kalman->MP = MP;
kalman->CP = CP;
kalman->state_pre = cvCreateMat( DP, 1, CV_32FC1 );
cvZero( kalman->state_pre );
kalman->state_post = cvCreateMat( DP, 1, CV_32FC1 );
cvZero( kalman->state_post );
kalman->transition_matrix = cvCreateMat( DP, DP, CV_32FC1 );
cvSetIdentity( kalman->transition_matrix );
kalman->process_noise_cov = cvCreateMat( DP, DP, CV_32FC1 );
cvSetIdentity( kalman->process_noise_cov );
kalman->measurement_matrix = cvCreateMat( MP, DP, CV_32FC1 );
cvZero( kalman->measurement_matrix );
kalman->measurement_noise_cov = cvCreateMat( MP, MP, CV_32FC1 );
cvSetIdentity( kalman->measurement_noise_cov );
kalman->error_cov_pre = cvCreateMat( DP, DP, CV_32FC1 );
kalman->error_cov_post = cvCreateMat( DP, DP, CV_32FC1 );
cvZero( kalman->error_cov_post );
kalman->gain = cvCreateMat( DP, MP, CV_32FC1 );
if( CP > 0 )
{
kalman->control_matrix = cvCreateMat( DP, CP, CV_32FC1 );
cvZero( kalman->control_matrix );
}
kalman->temp1 = cvCreateMat( DP, DP, CV_32FC1 );
kalman->temp2 = cvCreateMat( MP, DP, CV_32FC1 );
kalman->temp3 = cvCreateMat( MP, MP, CV_32FC1 );
kalman->temp4 = cvCreateMat( MP, DP, CV_32FC1 );
kalman->temp5 = cvCreateMat( MP, 1, CV_32FC1 );
#if 1
kalman->PosterState = kalman->state_pre->data.fl;
kalman->PriorState = kalman->state_post->data.fl;
kalman->DynamMatr = kalman->transition_matrix->data.fl;
kalman->MeasurementMatr = kalman->measurement_matrix->data.fl;
kalman->MNCovariance = kalman->measurement_noise_cov->data.fl;
kalman->PNCovariance = kalman->process_noise_cov->data.fl;
kalman->KalmGainMatr = kalman->gain->data.fl;
kalman->PriorErrorCovariance = kalman->error_cov_pre->data.fl;
kalman->PosterErrorCovariance = kalman->error_cov_post->data.fl;
#endif
return kalman;
}
CV_IMPL void
cvReleaseKalman( CvKalman** _kalman )
{
CvKalman *kalman;
if( !_kalman )
CV_Error( CV_StsNullPtr, "" );
kalman = *_kalman;
if( !kalman )
return;
/* freeing the memory */
cvReleaseMat( &kalman->state_pre );
cvReleaseMat( &kalman->state_post );
cvReleaseMat( &kalman->transition_matrix );
cvReleaseMat( &kalman->control_matrix );
cvReleaseMat( &kalman->measurement_matrix );
cvReleaseMat( &kalman->process_noise_cov );
cvReleaseMat( &kalman->measurement_noise_cov );
cvReleaseMat( &kalman->error_cov_pre );
cvReleaseMat( &kalman->gain );
cvReleaseMat( &kalman->error_cov_post );
cvReleaseMat( &kalman->temp1 );
cvReleaseMat( &kalman->temp2 );
cvReleaseMat( &kalman->temp3 );
cvReleaseMat( &kalman->temp4 );
cvReleaseMat( &kalman->temp5 );
memset( kalman, 0, sizeof(*kalman));
/* deallocating the structure */
cvFree( _kalman );
}
CV_IMPL const CvMat*
cvKalmanPredict( CvKalman* kalman, const CvMat* control )
{
if( !kalman )
CV_Error( CV_StsNullPtr, "" );
/* update the state */
/* x'(k) = A*x(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->state_post, 0, kalman->state_pre );
if( control && kalman->CP > 0 )
/* x'(k) = x'(k) + B*u(k) */
cvMatMulAdd( kalman->control_matrix, control, kalman->state_pre, kalman->state_pre );
/* update error covariance matrices */
/* temp1 = A*P(k) */
cvMatMulAdd( kalman->transition_matrix, kalman->error_cov_post, 0, kalman->temp1 );
/* P'(k) = temp1*At + Q */
cvGEMM( kalman->temp1, kalman->transition_matrix, 1, kalman->process_noise_cov, 1,
kalman->error_cov_pre, CV_GEMM_B_T );
/* handle the case when there will be measurement before the next predict */
cvCopy(kalman->state_pre, kalman->state_post);
return kalman->state_pre;
}
CV_IMPL const CvMat*
cvKalmanCorrect( CvKalman* kalman, const CvMat* measurement )
{
if( !kalman || !measurement )
CV_Error( CV_StsNullPtr, "" );
/* temp2 = H*P'(k) */
cvMatMulAdd( kalman->measurement_matrix, kalman->error_cov_pre, 0, kalman->temp2 );
/* temp3 = temp2*Ht + R */
cvGEMM( kalman->temp2, kalman->measurement_matrix, 1,
kalman->measurement_noise_cov, 1, kalman->temp3, CV_GEMM_B_T );
/* temp4 = inv(temp3)*temp2 = Kt(k) */
cvSolve( kalman->temp3, kalman->temp2, kalman->temp4, CV_SVD );
/* K(k) */
cvTranspose( kalman->temp4, kalman->gain );
/* temp5 = z(k) - H*x'(k) */
cvGEMM( kalman->measurement_matrix, kalman->state_pre, -1, measurement, 1, kalman->temp5 );
/* x(k) = x'(k) + K(k)*temp5 */
cvMatMulAdd( kalman->gain, kalman->temp5, kalman->state_pre, kalman->state_post );
/* P(k) = P'(k) - K(k)*temp2 */
cvGEMM( kalman->gain, kalman->temp2, -1, kalman->error_cov_pre, 1,
kalman->error_cov_post, 0 );
return kalman->state_post;
}
namespace cv namespace cv
{ {

File diff suppressed because it is too large Load Diff

View File

@ -41,34 +41,23 @@
#include "precomp.hpp" #include "precomp.hpp"
void cv::updateMotionHistory( InputArray _silhouette, InputOutputArray _mhi,
/* motion templates */ double timestamp, double duration )
CV_IMPL void
cvUpdateMotionHistory( const void* silhouette, void* mhimg,
double timestamp, double mhi_duration )
{ {
CvMat silhstub, *silh = cvGetMat(silhouette, &silhstub); Mat silh = _silhouette.getMat(), mhi = _mhi.getMat();
CvMat mhistub, *mhi = cvGetMat(mhimg, &mhistub);
if( !CV_IS_MASK_ARR( silh )) CV_Assert( silh.type() == CV_8U && mhi.type() == CV_32F );
CV_Error( CV_StsBadMask, "" ); CV_Assert( silh.size() == mhi.size() );
if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 ) Size size = silh.size();
CV_Error( CV_StsUnsupportedFormat, "" ); if( silh.isContinuous() && mhi.isContinuous() )
if( !CV_ARE_SIZES_EQ( mhi, silh ))
CV_Error( CV_StsUnmatchedSizes, "" );
CvSize size = cvGetMatSize( mhi );
if( CV_IS_MAT_CONT( mhi->type & silh->type ))
{ {
size.width *= size.height; size.width *= size.height;
size.height = 1; size.height = 1;
} }
float ts = (float)timestamp; float ts = (float)timestamp;
float delbound = (float)(timestamp - mhi_duration); float delbound = (float)(timestamp - duration);
int x, y; int x, y;
#if CV_SSE2 #if CV_SSE2
volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2); volatile bool useSIMD = cv::checkHardwareSupport(CV_CPU_SSE2);
@ -76,8 +65,8 @@ cvUpdateMotionHistory( const void* silhouette, void* mhimg,
for( y = 0; y < size.height; y++ ) for( y = 0; y < size.height; y++ )
{ {
const uchar* silhData = silh->data.ptr + silh->step*y; const uchar* silhData = silh.ptr<uchar>(y);
float* mhiData = (float*)(mhi->data.ptr + mhi->step*y); float* mhiData = mhi.ptr<float>(y);
x = 0; x = 0;
#if CV_SSE2 #if CV_SSE2
@ -117,26 +106,21 @@ cvUpdateMotionHistory( const void* silhouette, void* mhimg,
} }
CV_IMPL void void cv::calcMotionGradient( InputArray _mhi, OutputArray _mask,
cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg, OutputArray _orientation,
CvArr* orientation, double delta1, double delta2,
double delta1, double delta2, int aperture_size )
int aperture_size )
{ {
cv::Ptr<CvMat> dX_min, dY_max; static int runcase = 0; runcase++;
CvMat mhistub, *mhi = cvGetMat(mhiimg, &mhistub); Mat mhi = _mhi.getMat();
CvMat maskstub, *mask = cvGetMat(maskimg, &maskstub); Size size = mhi.size();
CvMat orientstub, *orient = cvGetMat(orientation, &orientstub);
CvMat dX_min_row, dY_max_row, orient_row, mask_row;
CvSize size;
int x, y;
float gradient_epsilon = 1e-4f * aperture_size * aperture_size; _mask.create(size, CV_8U);
float min_delta, max_delta; _orientation.create(size, CV_32F);
if( !CV_IS_MASK_ARR( mask )) Mat mask = _mask.getMat();
CV_Error( CV_StsBadMask, "" ); Mat orient = _orientation.getMat();
if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 ) if( aperture_size < 3 || aperture_size > 7 || (aperture_size & 1) == 0 )
CV_Error( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" ); CV_Error( CV_StsOutOfRange, "aperture_size must be 3, 5 or 7" );
@ -144,343 +128,247 @@ cvCalcMotionGradient( const CvArr* mhiimg, CvArr* maskimg,
if( delta1 <= 0 || delta2 <= 0 ) if( delta1 <= 0 || delta2 <= 0 )
CV_Error( CV_StsOutOfRange, "both delta's must be positive" ); CV_Error( CV_StsOutOfRange, "both delta's must be positive" );
if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 ) if( mhi.type() != CV_32FC1 )
CV_Error( CV_StsUnsupportedFormat, CV_Error( CV_StsUnsupportedFormat,
"MHI and orientation must be single-channel floating-point images" ); "MHI must be single-channel floating-point images" );
if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi )) if( orient.data == mhi.data )
CV_Error( CV_StsUnmatchedSizes, "" );
if( orient->data.ptr == mhi->data.ptr )
CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );
if( delta1 > delta2 )
{ {
double t; _orientation.release();
CV_SWAP( delta1, delta2, t ); _orientation.create(size, CV_32F);
orient = _orientation.getMat();
} }
size = cvGetMatSize( mhi ); if( delta1 > delta2 )
min_delta = (float)delta1; std::swap(delta1, delta2);
max_delta = (float)delta2;
dX_min = cvCreateMat( mhi->rows, mhi->cols, CV_32F ); float gradient_epsilon = 1e-4f * aperture_size * aperture_size;
dY_max = cvCreateMat( mhi->rows, mhi->cols, CV_32F ); float min_delta = (float)delta1;
float max_delta = (float)delta2;
Mat dX_min, dY_max;
// calc Dx and Dy // calc Dx and Dy
cvSobel( mhi, dX_min, 1, 0, aperture_size ); Sobel( mhi, dX_min, CV_32F, 1, 0, aperture_size, 1, 0, BORDER_REPLICATE );
cvSobel( mhi, dY_max, 0, 1, aperture_size ); Sobel( mhi, dY_max, CV_32F, 0, 1, aperture_size, 1, 0, BORDER_REPLICATE );
cvGetRow( dX_min, &dX_min_row, 0 );
cvGetRow( dY_max, &dY_max_row, 0 ); int x, y;
cvGetRow( orient, &orient_row, 0 );
cvGetRow( mask, &mask_row, 0 ); if( mhi.isContinuous() && orient.isContinuous() && mask.isContinuous() )
{
size.width *= size.height;
size.height = 1;
}
// calc gradient // calc gradient
for( y = 0; y < size.height; y++ ) for( y = 0; y < size.height; y++ )
{ {
dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step; const float* dX_min_row = dX_min.ptr<float>(y);
dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step; const float* dY_max_row = dY_max.ptr<float>(y);
orient_row.data.ptr = orient->data.ptr + y*orient->step; float* orient_row = orient.ptr<float>(y);
mask_row.data.ptr = mask->data.ptr + y*mask->step; uchar* mask_row = mask.ptr<uchar>(y);
cvCartToPolar( &dX_min_row, &dY_max_row, 0, &orient_row, 1 );
fastAtan2(dY_max_row, dX_min_row, orient_row, size.width, true);
// make orientation zero where the gradient is very small // make orientation zero where the gradient is very small
for( x = 0; x < size.width; x++ ) for( x = 0; x < size.width; x++ )
{ {
float dY = dY_max_row.data.fl[x]; float dY = dY_max_row[x];
float dX = dX_min_row.data.fl[x]; float dX = dX_min_row[x];
if( fabs(dX) < gradient_epsilon && fabs(dY) < gradient_epsilon ) if( std::abs(dX) < gradient_epsilon && std::abs(dY) < gradient_epsilon )
{ {
mask_row.data.ptr[x] = 0; mask_row[x] = (uchar)0;
orient_row.data.i[x] = 0; orient_row[x] = 0.f;
} }
else else
mask_row.data.ptr[x] = 1; mask_row[x] = (uchar)1;
} }
} }
cvErode( mhi, dX_min, 0, (aperture_size-1)/2); erode( mhi, dX_min, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE );
cvDilate( mhi, dY_max, 0, (aperture_size-1)/2); dilate( mhi, dY_max, noArray(), Point(-1,-1), (aperture_size-1)/2, BORDER_REPLICATE );
// mask off pixels which have little motion difference in their neighborhood // mask off pixels which have little motion difference in their neighborhood
for( y = 0; y < size.height; y++ ) for( y = 0; y < size.height; y++ )
{ {
dX_min_row.data.ptr = dX_min->data.ptr + y*dX_min->step; const float* dX_min_row = dX_min.ptr<float>(y);
dY_max_row.data.ptr = dY_max->data.ptr + y*dY_max->step; const float* dY_max_row = dY_max.ptr<float>(y);
mask_row.data.ptr = mask->data.ptr + y*mask->step; float* orient_row = orient.ptr<float>(y);
orient_row.data.ptr = orient->data.ptr + y*orient->step; uchar* mask_row = mask.ptr<uchar>(y);
for( x = 0; x < size.width; x++ ) for( x = 0; x < size.width; x++ )
{ {
float d0 = dY_max_row.data.fl[x] - dX_min_row.data.fl[x]; float d0 = dY_max_row[x] - dX_min_row[x];
if( mask_row.data.ptr[x] == 0 || d0 < min_delta || max_delta < d0 ) if( mask_row[x] == 0 || d0 < min_delta || max_delta < d0 )
{ {
mask_row.data.ptr[x] = 0; mask_row[x] = (uchar)0;
orient_row.data.i[x] = 0; orient_row[x] = 0.f;
} }
} }
} }
} }
CV_IMPL double
cvCalcGlobalOrientation( const void* orientation, const void* maskimg, const void* mhiimg,
double curr_mhi_timestamp, double mhi_duration )
{
int hist_size = 12;
cv::Ptr<CvHistogram> hist;
CvMat mhistub, *mhi = cvGetMat(mhiimg, &mhistub);
CvMat maskstub, *mask = cvGetMat(maskimg, &maskstub);
CvMat orientstub, *orient = cvGetMat(orientation, &orientstub);
void* _orient;
float _ranges[] = { 0, 360 };
float* ranges = _ranges;
int base_orient;
float shift_orient = 0, shift_weight = 0;
float a, b, fbase_orient;
float delbound;
CvMat mhi_row, mask_row, orient_row;
int x, y, mhi_rows, mhi_cols;
if( !CV_IS_MASK_ARR( mask ))
CV_Error( CV_StsBadMask, "" );
if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( orient->type ) != CV_32FC1 )
CV_Error( CV_StsUnsupportedFormat,
"MHI and orientation must be single-channel floating-point images" );
if( !CV_ARE_SIZES_EQ( mhi, mask ) || !CV_ARE_SIZES_EQ( orient, mhi ))
CV_Error( CV_StsUnmatchedSizes, "" );
if( mhi_duration <= 0 )
CV_Error( CV_StsOutOfRange, "MHI duration must be positive" );
if( orient->data.ptr == mhi->data.ptr )
CV_Error( CV_StsInplaceNotSupported, "orientation image must be different from MHI" );
// calculate histogram of different orientation values
hist = cvCreateHist( 1, &hist_size, CV_HIST_ARRAY, &ranges );
_orient = orient;
cvCalcArrHist( &_orient, hist, 0, mask );
// find the maximum index (the dominant orientation)
cvGetMinMaxHistValue( hist, 0, 0, 0, &base_orient );
fbase_orient = base_orient*360.f/hist_size;
// override timestamp with the maximum value in MHI
cvMinMaxLoc( mhi, 0, &curr_mhi_timestamp, 0, 0, mask );
// find the shift relative to the dominant orientation as weighted sum of relative angles
a = (float)(254. / 255. / mhi_duration);
b = (float)(1. - curr_mhi_timestamp * a);
delbound = (float)(curr_mhi_timestamp - mhi_duration);
mhi_rows = mhi->rows;
mhi_cols = mhi->cols;
if( CV_IS_MAT_CONT( mhi->type & mask->type & orient->type ))
{
mhi_cols *= mhi_rows;
mhi_rows = 1;
}
cvGetRow( mhi, &mhi_row, 0 );
cvGetRow( mask, &mask_row, 0 );
cvGetRow( orient, &orient_row, 0 );
/*
a = 254/(255*dt)
b = 1 - t*a = 1 - 254*t/(255*dur) =
(255*dt - 254*t)/(255*dt) =
(dt - (t - dt)*254)/(255*dt);
--------------------------------------------------------
ax + b = 254*x/(255*dt) + (dt - (t - dt)*254)/(255*dt) =
(254*x + dt - (t - dt)*254)/(255*dt) =
((x - (t - dt))*254 + dt)/(255*dt) =
(((x - (t - dt))/dt)*254 + 1)/255 = (((x - low_time)/dt)*254 + 1)/255
*/
for( y = 0; y < mhi_rows; y++ )
{
mhi_row.data.ptr = mhi->data.ptr + mhi->step*y;
mask_row.data.ptr = mask->data.ptr + mask->step*y;
orient_row.data.ptr = orient->data.ptr + orient->step*y;
for( x = 0; x < mhi_cols; x++ )
if( mask_row.data.ptr[x] != 0 && mhi_row.data.fl[x] > delbound )
{
/*
orient in 0..360, base_orient in 0..360
-> (rel_angle = orient - base_orient) in -360..360.
rel_angle is translated to -180..180
*/
float weight = mhi_row.data.fl[x] * a + b;
float rel_angle = orient_row.data.fl[x] - fbase_orient;
rel_angle += (rel_angle < -180 ? 360 : 0);
rel_angle += (rel_angle > 180 ? -360 : 0);
if( fabs(rel_angle) < 45 )
{
shift_orient += weight * rel_angle;
shift_weight += weight;
}
}
}
// add the dominant orientation and the relative shift
if( shift_weight == 0 )
shift_weight = 0.01f;
fbase_orient += shift_orient / shift_weight;
fbase_orient -= (fbase_orient < 360 ? 0 : 360);
fbase_orient += (fbase_orient >= 0 ? 0 : 360);
return fbase_orient;
}
CV_IMPL CvSeq*
cvSegmentMotion( const CvArr* mhiimg, CvArr* segmask, CvMemStorage* storage,
double timestamp, double seg_thresh )
{
CvSeq* components = 0;
cv::Ptr<CvMat> mask8u;
CvMat mhistub, *mhi = cvGetMat(mhiimg, &mhistub);
CvMat maskstub, *mask = cvGetMat(segmask, &maskstub);
Cv32suf v, comp_idx;
int stub_val, ts;
int x, y;
if( !storage )
CV_Error( CV_StsNullPtr, "NULL memory storage" );
mhi = cvGetMat( mhi, &mhistub );
mask = cvGetMat( mask, &maskstub );
if( CV_MAT_TYPE( mhi->type ) != CV_32FC1 || CV_MAT_TYPE( mask->type ) != CV_32FC1 )
CV_Error( CV_BadDepth, "Both MHI and the destination mask" );
if( !CV_ARE_SIZES_EQ( mhi, mask ))
CV_Error( CV_StsUnmatchedSizes, "" );
mask8u = cvCreateMat( mhi->rows + 2, mhi->cols + 2, CV_8UC1 );
cvZero( mask8u );
cvZero( mask );
components = cvCreateSeq( CV_SEQ_KIND_GENERIC, sizeof(CvSeq),
sizeof(CvConnectedComp), storage );
v.f = (float)timestamp; ts = v.i;
v.f = FLT_MAX*0.1f; stub_val = v.i;
comp_idx.f = 1;
for( y = 0; y < mhi->rows; y++ )
{
int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);
for( x = 0; x < mhi->cols; x++ )
{
if( mhi_row[x] == 0 )
mhi_row[x] = stub_val;
}
}
for( y = 0; y < mhi->rows; y++ )
{
int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);
uchar* mask8u_row = mask8u->data.ptr + (y+1)*mask8u->step + 1;
for( x = 0; x < mhi->cols; x++ )
{
if( mhi_row[x] == ts && mask8u_row[x] == 0 )
{
CvConnectedComp comp;
int x1, y1;
CvScalar _seg_thresh = cvRealScalar(seg_thresh);
CvPoint seed = cvPoint(x,y);
cvFloodFill( mhi, seed, cvRealScalar(0), _seg_thresh, _seg_thresh,
&comp, CV_FLOODFILL_MASK_ONLY + 2*256 + 4, mask8u );
for( y1 = 0; y1 < comp.rect.height; y1++ )
{
int* mask_row1 = (int*)(mask->data.ptr +
(comp.rect.y + y1)*mask->step) + comp.rect.x;
uchar* mask8u_row1 = mask8u->data.ptr +
(comp.rect.y + y1+1)*mask8u->step + comp.rect.x+1;
for( x1 = 0; x1 < comp.rect.width; x1++ )
{
if( mask8u_row1[x1] > 1 )
{
mask8u_row1[x1] = 1;
mask_row1[x1] = comp_idx.i;
}
}
}
comp_idx.f++;
cvSeqPush( components, &comp );
}
}
}
for( y = 0; y < mhi->rows; y++ )
{
int* mhi_row = (int*)(mhi->data.ptr + y*mhi->step);
for( x = 0; x < mhi->cols; x++ )
{
if( mhi_row[x] == stub_val )
mhi_row[x] = 0;
}
}
return components;
}
void cv::updateMotionHistory( InputArray _silhouette, InputOutputArray _mhi,
double timestamp, double duration )
{
Mat silhouette = _silhouette.getMat();
CvMat c_silhouette = silhouette, c_mhi = _mhi.getMat();
cvUpdateMotionHistory( &c_silhouette, &c_mhi, timestamp, duration );
}
void cv::calcMotionGradient( InputArray _mhi, OutputArray _mask,
OutputArray _orientation,
double delta1, double delta2,
int aperture_size )
{
Mat mhi = _mhi.getMat();
_mask.create(mhi.size(), CV_8U);
_orientation.create(mhi.size(), CV_32F);
CvMat c_mhi = mhi, c_mask = _mask.getMat(), c_orientation = _orientation.getMat();
cvCalcMotionGradient(&c_mhi, &c_mask, &c_orientation, delta1, delta2, aperture_size);
}
double cv::calcGlobalOrientation( InputArray _orientation, InputArray _mask, double cv::calcGlobalOrientation( InputArray _orientation, InputArray _mask,
InputArray _mhi, double timestamp, InputArray _mhi, double /*timestamp*/,
double duration ) double duration )
{ {
Mat orientation = _orientation.getMat(), mask = _mask.getMat(), mhi = _mhi.getMat(); Mat orient = _orientation.getMat(), mask = _mask.getMat(), mhi = _mhi.getMat();
CvMat c_orientation = orientation, c_mask = mask, c_mhi = mhi; Size size = mhi.size();
return cvCalcGlobalOrientation(&c_orientation, &c_mask, &c_mhi, timestamp, duration);
CV_Assert( mask.type() == CV_8U && orient.type() == CV_32F && mhi.type() == CV_32F );
CV_Assert( mask.size() == size && orient.size() == size );
CV_Assert( duration > 0 );
int histSize = 12;
float _ranges[] = { 0.f, 360.f };
const float* ranges = _ranges;
Mat hist;
calcHist(&orient, 1, 0, mask, hist, 1, &histSize, &ranges);
// find the maximum index (the dominant orientation)
Point baseOrientPt;
minMaxLoc(hist, 0, 0, 0, &baseOrientPt);
float fbaseOrient = (baseOrientPt.x + baseOrientPt.y)*360.f/histSize;
// override timestamp with the maximum value in MHI
double timestamp = 0;
minMaxLoc( mhi, 0, &timestamp, 0, 0, mask );
// find the shift relative to the dominant orientation as weighted sum of relative angles
float a = (float)(254. / 255. / duration);
float b = (float)(1. - timestamp * a);
float delbound = (float)(timestamp - duration);
if( mhi.isContinuous() && mask.isContinuous() && orient.isContinuous() )
{
size.width *= size.height;
size.height = 1;
}
/*
a = 254/(255*dt)
b = 1 - t*a = 1 - 254*t/(255*dur) =
(255*dt - 254*t)/(255*dt) =
(dt - (t - dt)*254)/(255*dt);
--------------------------------------------------------
ax + b = 254*x/(255*dt) + (dt - (t - dt)*254)/(255*dt) =
(254*x + dt - (t - dt)*254)/(255*dt) =
((x - (t - dt))*254 + dt)/(255*dt) =
(((x - (t - dt))/dt)*254 + 1)/255 = (((x - low_time)/dt)*254 + 1)/255
*/
float shiftOrient = 0, shiftWeight = 0;
for( int y = 0; y < size.height; y++ )
{
const float* mhiptr = mhi.ptr<float>(y);
const float* oriptr = orient.ptr<float>(y);
const uchar* maskptr = mask.ptr<uchar>(y);
for( int x = 0; x < size.width; x++ )
{
if( maskptr[x] != 0 && mhiptr[x] > delbound )
{
/*
orient in 0..360, base_orient in 0..360
-> (rel_angle = orient - base_orient) in -360..360.
rel_angle is translated to -180..180
*/
float weight = mhiptr[x] * a + b;
float relAngle = oriptr[x] - fbaseOrient;
relAngle += (relAngle < -180 ? 360 : 0);
relAngle += (relAngle > 180 ? -360 : 0);
if( fabs(relAngle) < 45 )
{
shiftOrient += weight * relAngle;
shiftWeight += weight;
}
}
}
}
// add the dominant orientation and the relative shift
if( shiftWeight == 0 )
shiftWeight = 0.01f;
fbaseOrient += shiftOrient / shiftWeight;
fbaseOrient -= (fbaseOrient < 360 ? 0 : 360);
fbaseOrient += (fbaseOrient >= 0 ? 0 : 360);
return fbaseOrient;
} }
void cv::segmentMotion(InputArray _mhi, OutputArray _segmask, void cv::segmentMotion(InputArray _mhi, OutputArray _segmask,
std::vector<Rect>& boundingRects, std::vector<Rect>& boundingRects,
double timestamp, double segThresh) double timestamp, double segThresh)
{ {
Mat mhi = _mhi.getMat(); Mat mhi = _mhi.getMat();
_segmask.create(mhi.size(), CV_32F); _segmask.create(mhi.size(), CV_32F);
CvMat c_mhi = mhi, c_segmask = _segmask.getMat(); Mat segmask = _segmask.getMat();
Ptr<CvMemStorage> storage = cvCreateMemStorage(); segmask = Scalar::all(0);
Seq<CvConnectedComp> comps = cvSegmentMotion(&c_mhi, &c_segmask, storage, timestamp, segThresh);
Seq<CvConnectedComp>::const_iterator it(comps); CV_Assert( mhi.type() == CV_32F );
size_t i, ncomps = comps.size(); CV_Assert( segThresh >= 0 );
boundingRects.resize(ncomps);
for( i = 0; i < ncomps; i++, ++it) Mat mask = Mat::zeros( mhi.rows + 2, mhi.cols + 2, CV_8UC1 );
boundingRects[i] = (*it).rect;
int x, y;
// protect zero mhi pixels from floodfill.
for( y = 0; y < mhi.rows; y++ )
{
const float* mhiptr = mhi.ptr<float>(y);
uchar* maskptr = mask.ptr<uchar>(y+1) + 1;
for( x = 0; x < mhi.cols; x++ )
{
if( mhiptr[x] == 0 )
maskptr[x] = 1;
}
}
float ts = (float)timestamp;
float comp_idx = 1.f;
for( y = 0; y < mhi.rows; y++ )
{
float* mhiptr = mhi.ptr<float>(y);
uchar* maskptr = mask.ptr<uchar>(y+1) + 1;
for( x = 0; x < mhi.cols; x++ )
{
if( mhiptr[x] == ts && maskptr[x] == 0 )
{
Rect cc;
floodFill( mhi, mask, Point(x,y), Scalar::all(0),
&cc, Scalar::all(segThresh), Scalar::all(segThresh),
FLOODFILL_MASK_ONLY + 2*256 + 4 );
for( int y1 = 0; y1 < cc.height; y1++ )
{
float* segmaskptr = segmask.ptr<float>(cc.y + y1) + cc.x;
uchar* maskptr1 = mask.ptr<uchar>(cc.y + y1 + 1) + cc.x + 1;
for( int x1 = 0; x1 < cc.width; x1++ )
{
if( maskptr1[x1] > 1 )
{
maskptr1[x1] = 1;
segmaskptr[x1] = comp_idx;
}
}
}
comp_idx += 1.f;
boundingRects.push_back(cc);
}
}
}
} }
/* End of file. */ /* End of file. */

View File

@ -644,18 +644,3 @@ void cv::calcOpticalFlowFarneback( InputArray _prev0, InputArray _next0,
prevFlow = flow; prevFlow = flow;
} }
} }
CV_IMPL void cvCalcOpticalFlowFarneback(
const CvArr* _prev, const CvArr* _next,
CvArr* _flow, double pyr_scale, int levels,
int winsize, int iterations, int poly_n,
double poly_sigma, int flags )
{
cv::Mat prev = cv::cvarrToMat(_prev), next = cv::cvarrToMat(_next);
cv::Mat flow = cv::cvarrToMat(_flow);
CV_Assert( flow.size() == prev.size() && flow.type() == CV_32FC2 );
cv::calcOpticalFlowFarneback( prev, next, flow, pyr_scale, levels,
winsize, iterations, poly_n, poly_sigma, flags );
}

View File

@ -41,7 +41,6 @@
//M*/ //M*/
#include "precomp.hpp" #include "precomp.hpp"
#include "simpleflow.hpp"
// //
// 2D dense optical flow algorithm from the following paper: // 2D dense optical flow algorithm from the following paper:
@ -54,6 +53,39 @@
namespace cv namespace cv
{ {
static const uchar MASK_TRUE_VALUE = (uchar)255;
inline static float dist(const Vec3b& p1, const Vec3b& p2) {
return (float)((p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]) +
(p1[2] - p2[2]) * (p1[2] - p2[2]));
}
inline static float dist(const Vec2f& p1, const Vec2f& p2) {
return (p1[0] - p2[0]) * (p1[0] - p2[0]) +
(p1[1] - p2[1]) * (p1[1] - p2[1]);
}
inline static float dist(const Point2f& p1, const Point2f& p2) {
return (p1.x - p2.x) * (p1.x - p2.x) +
(p1.y - p2.y) * (p1.y - p2.y);
}
inline static float dist(float x1, float y1, float x2, float y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
inline static int dist(int x1, int y1, int x2, int y2) {
return (x1 - x2) * (x1 - x2) +
(y1 - y2) * (y1 - y2);
}
template<class T>
inline static T min(T t1, T t2, T t3) {
return (t1 <= t2 && t1 <= t3) ? t1 : min(t2, t3);
}
static void removeOcclusions(const Mat& flow, static void removeOcclusions(const Mat& flow,
const Mat& flow_inv, const Mat& flow_inv,
float occ_thr, float occ_thr,

View File

@ -46,50 +46,9 @@
namespace cv namespace cv
{ {
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorMOG, "BackgroundSubtractor.MOG",
obj.info()->addParam(obj, "history", obj.history);
obj.info()->addParam(obj, "nmixtures", obj.nmixtures);
obj.info()->addParam(obj, "backgroundRatio", obj.backgroundRatio);
obj.info()->addParam(obj, "noiseSigma", obj.noiseSigma));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorMOG2, "BackgroundSubtractor.MOG2",
obj.info()->addParam(obj, "history", obj.history);
obj.info()->addParam(obj, "nmixtures", obj.nmixtures);
obj.info()->addParam(obj, "varThreshold", obj.varThreshold);
obj.info()->addParam(obj, "detectShadows", obj.bShadowDetection));
///////////////////////////////////////////////////////////////////////////////////////////////////////////
CV_INIT_ALGORITHM(BackgroundSubtractorGMG, "BackgroundSubtractor.GMG",
obj.info()->addParam(obj, "maxFeatures", obj.maxFeatures,false,0,0,
"Maximum number of features to store in histogram. Harsh enforcement of sparsity constraint.");
obj.info()->addParam(obj, "learningRate", obj.learningRate,false,0,0,
"Adaptation rate of histogram. Close to 1, slow adaptation. Close to 0, fast adaptation, features forgotten quickly.");
obj.info()->addParam(obj, "initializationFrames", obj.numInitializationFrames,false,0,0,
"Number of frames to use to initialize histograms of pixels.");
obj.info()->addParam(obj, "quantizationLevels", obj.quantizationLevels,false,0,0,
"Number of discrete colors to be used in histograms. Up-front quantization.");
obj.info()->addParam(obj, "backgroundPrior", obj.backgroundPrior,false,0,0,
"Prior probability that each individual pixel is a background pixel.");
obj.info()->addParam(obj, "smoothingRadius", obj.smoothingRadius,false,0,0,
"Radius of smoothing kernel to filter noise from FG mask image.");
obj.info()->addParam(obj, "decisionThreshold", obj.decisionThreshold,false,0,0,
"Threshold for FG decision rule. Pixel is FG if posterior probability exceeds threshold.");
obj.info()->addParam(obj, "updateBackgroundModel", obj.updateBackgroundModel,false,0,0,
"Perform background model update."));
bool initModule_video(void) bool initModule_video(void)
{ {
bool all = true; return true;
all &= !BackgroundSubtractorMOG_info_auto.name().empty();
all &= !BackgroundSubtractorMOG2_info_auto.name().empty();
all &= !BackgroundSubtractorGMG_info_auto.name().empty();
return all;
} }
} }

View File

@ -37,8 +37,7 @@ void CV_BackgroundSubtractorTest::run(int)
int width = 2 + ((unsigned int)rng)%98; //!< Mat will be 2 to 100 in width and height int width = 2 + ((unsigned int)rng)%98; //!< Mat will be 2 to 100 in width and height
int height = 2 + ((unsigned int)rng)%98; int height = 2 + ((unsigned int)rng)%98;
Ptr<BackgroundSubtractorGMG> fgbg = Ptr<BackgroundSubtractorGMG> fgbg = createBackgroundSubtractorGMG();
Algorithm::create<BackgroundSubtractorGMG>("BackgroundSubtractor.GMG");
Mat fgmask; Mat fgmask;
if (fgbg.empty()) if (fgbg.empty())
@ -47,19 +46,13 @@ void CV_BackgroundSubtractorTest::run(int)
/** /**
* Set a few parameters * Set a few parameters
*/ */
fgbg->set("smoothingRadius",7); fgbg->setSmoothingRadius(7);
fgbg->set("decisionThreshold",0.7); fgbg->setDecisionThreshold(0.7);
fgbg->set("initializationFrames",120); fgbg->setNumFrames(120);
/** /**
* Generate bounds for the values in the matrix for each type * Generate bounds for the values in the matrix for each type
*/ */
uchar maxuc = 0, minuc = 0;
char maxc = 0, minc = 0;
unsigned int maxui = 0, minui = 0;
int maxi=0, mini = 0;
long int maxli = 0, minli = 0;
float maxf = 0, minf = 0;
double maxd = 0, mind = 0; double maxd = 0, mind = 0;
/** /**
@ -69,34 +62,34 @@ void CV_BackgroundSubtractorTest::run(int)
if (type == CV_8U) if (type == CV_8U)
{ {
uchar half = UCHAR_MAX/2; uchar half = UCHAR_MAX/2;
maxuc = (unsigned char)rng.uniform(half+32, UCHAR_MAX); maxd = (unsigned char)rng.uniform(half+32, UCHAR_MAX);
minuc = (unsigned char)rng.uniform(0, half-32); mind = (unsigned char)rng.uniform(0, half-32);
} }
else if (type == CV_8S) else if (type == CV_8S)
{ {
maxc = (char)rng.uniform(32, CHAR_MAX); maxd = (char)rng.uniform(32, CHAR_MAX);
minc = (char)rng.uniform(CHAR_MIN, -32); mind = (char)rng.uniform(CHAR_MIN, -32);
} }
else if (type == CV_16U) else if (type == CV_16U)
{ {
ushort half = USHRT_MAX/2; ushort half = USHRT_MAX/2;
maxui = (unsigned int)rng.uniform(half+32, USHRT_MAX); maxd = (unsigned int)rng.uniform(half+32, USHRT_MAX);
minui = (unsigned int)rng.uniform(0, half-32); mind = (unsigned int)rng.uniform(0, half-32);
} }
else if (type == CV_16S) else if (type == CV_16S)
{ {
maxi = rng.uniform(32, SHRT_MAX); maxd = rng.uniform(32, SHRT_MAX);
mini = rng.uniform(SHRT_MIN, -32); mind = rng.uniform(SHRT_MIN, -32);
} }
else if (type == CV_32S) else if (type == CV_32S)
{ {
maxli = rng.uniform(32, INT_MAX); maxd = rng.uniform(32, INT_MAX);
minli = rng.uniform(INT_MIN, -32); mind = rng.uniform(INT_MIN, -32);
} }
else if (type == CV_32F) else if (type == CV_32F)
{ {
maxf = rng.uniform(32.0f, FLT_MAX); maxd = rng.uniform(32.0f, FLT_MAX);
minf = rng.uniform(-FLT_MAX, -32.0f); mind = rng.uniform(-FLT_MAX, -32.0f);
} }
else if (type == CV_64F) else if (type == CV_64F)
{ {
@ -104,60 +97,22 @@ void CV_BackgroundSubtractorTest::run(int)
mind = rng.uniform(-DBL_MAX, -32.0); mind = rng.uniform(-DBL_MAX, -32.0);
} }
fgbg->setMinVal(mind);
fgbg->setMaxVal(maxd);
Mat simImage = Mat::zeros(height, width, channelsAndType); Mat simImage = Mat::zeros(height, width, channelsAndType);
const unsigned int numLearningFrames = 120; int numLearningFrames = 120;
for (unsigned int i = 0; i < numLearningFrames; ++i) for (int i = 0; i < numLearningFrames; ++i)
{ {
/** /**
* Genrate simulated "image" for any type. Values always confined to upper half of range. * Genrate simulated "image" for any type. Values always confined to upper half of range.
*/ */
if (type == CV_8U) rng.fill(simImage, RNG::UNIFORM, (mind + maxd)*0.5, maxd);
{
rng.fill(simImage,RNG::UNIFORM,(unsigned char)(minuc/2+maxuc/2),maxuc);
if (i == 0)
fgbg->initialize(simImage.size(),minuc,maxuc);
}
else if (type == CV_8S)
{
rng.fill(simImage,RNG::UNIFORM,(char)(minc/2+maxc/2),maxc);
if (i==0)
fgbg->initialize(simImage.size(),minc,maxc);
}
else if (type == CV_16U)
{
rng.fill(simImage,RNG::UNIFORM,(unsigned int)(minui/2+maxui/2),maxui);
if (i==0)
fgbg->initialize(simImage.size(),minui,maxui);
}
else if (type == CV_16S)
{
rng.fill(simImage,RNG::UNIFORM,(int)(mini/2+maxi/2),maxi);
if (i==0)
fgbg->initialize(simImage.size(),mini,maxi);
}
else if (type == CV_32F)
{
rng.fill(simImage,RNG::UNIFORM,(float)(minf/2.0+maxf/2.0),maxf);
if (i==0)
fgbg->initialize(simImage.size(),minf,maxf);
}
else if (type == CV_32S)
{
rng.fill(simImage,RNG::UNIFORM,(long int)(minli/2+maxli/2),maxli);
if (i==0)
fgbg->initialize(simImage.size(),minli,maxli);
}
else if (type == CV_64F)
{
rng.fill(simImage,RNG::UNIFORM,(double)(mind/2.0+maxd/2.0),maxd);
if (i==0)
fgbg->initialize(simImage.size(),mind,maxd);
}
/** /**
* Feed simulated images into background subtractor * Feed simulated images into background subtractor
*/ */
(*fgbg)(simImage,fgmask); fgbg->apply(simImage,fgmask);
Mat fullbg = Mat::zeros(simImage.rows, simImage.cols, CV_8U); Mat fullbg = Mat::zeros(simImage.rows, simImage.cols, CV_8U);
//! fgmask should be entirely background during training //! fgmask should be entirely background during training
@ -166,22 +121,9 @@ void CV_BackgroundSubtractorTest::run(int)
ts->set_failed_test_info( code ); ts->set_failed_test_info( code );
} }
//! generate last image, distinct from training images //! generate last image, distinct from training images
if (type == CV_8U) rng.fill(simImage, RNG::UNIFORM, mind, maxd);
rng.fill(simImage,RNG::UNIFORM,minuc,minuc);
else if (type == CV_8S)
rng.fill(simImage,RNG::UNIFORM,minc,minc);
else if (type == CV_16U)
rng.fill(simImage,RNG::UNIFORM,minui,minui);
else if (type == CV_16S)
rng.fill(simImage,RNG::UNIFORM,mini,mini);
else if (type == CV_32F)
rng.fill(simImage,RNG::UNIFORM,minf,minf);
else if (type == CV_32S)
rng.fill(simImage,RNG::UNIFORM,minli,minli);
else if (type == CV_64F)
rng.fill(simImage,RNG::UNIFORM,mind,mind);
(*fgbg)(simImage,fgmask); fgbg->apply(simImage,fgmask);
//! now fgmask should be entirely foreground //! now fgmask should be entirely foreground
Mat fullfg = 255*Mat::ones(simImage.rows, simImage.cols, CV_8U); Mat fullfg = 255*Mat::ones(simImage.rows, simImage.cols, CV_8U);
code = cvtest::cmpEps2( ts, fgmask, fullfg, 255, false, "The final foreground mask" ); code = cvtest::cmpEps2( ts, fgmask, fullfg, 255, false, "The final foreground mask" );

View File

@ -153,7 +153,7 @@ bool CV_RigidTransform_Test::testImage()
Mat aff_est = estimateRigidTransform(img, rotated, true); Mat aff_est = estimateRigidTransform(img, rotated, true);
const double thres = 0.03; const double thres = 0.033;
if (norm(aff_est, aff, NORM_INF) > thres) if (norm(aff_est, aff, NORM_INF) > thres)
{ {
ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY); ts->set_failed_test_info(cvtest::TS::FAIL_BAD_ACCURACY);

View File

@ -32,16 +32,13 @@ int main(int argc, char** argv)
setUseOptimized(true); setUseOptimized(true);
setNumThreads(8); setNumThreads(8);
Ptr<BackgroundSubtractorGMG> fgbg = Algorithm::create<BackgroundSubtractorGMG>("BackgroundSubtractor.GMG"); Ptr<BackgroundSubtractor> fgbg = createBackgroundSubtractorGMG(20, 0.7);
if (fgbg.empty()) if (fgbg.empty())
{ {
std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl; std::cerr << "Failed to create BackgroundSubtractor.GMG Algorithm." << std::endl;
return -1; return -1;
} }
fgbg->set("initializationFrames", 20);
fgbg->set("decisionThreshold", 0.7);
VideoCapture cap; VideoCapture cap;
if (argc > 1) if (argc > 1)
cap.open(argv[1]); cap.open(argv[1]);
@ -65,9 +62,9 @@ int main(int argc, char** argv)
if (frame.empty()) if (frame.empty())
break; break;
(*fgbg)(frame, fgmask); fgbg->apply(frame, fgmask);
frame.copyTo(segm); frame.convertTo(segm, CV_8U, 0.5);
add(frame, Scalar(100, 100, 0), segm, fgmask); add(frame, Scalar(100, 100, 0), segm, fgmask);
imshow("FG Segmentation", segm); imshow("FG Segmentation", segm);

View File

@ -51,7 +51,7 @@ int main(int argc, const char** argv)
namedWindow("foreground image", CV_WINDOW_NORMAL); namedWindow("foreground image", CV_WINDOW_NORMAL);
namedWindow("mean background image", CV_WINDOW_NORMAL); namedWindow("mean background image", CV_WINDOW_NORMAL);
BackgroundSubtractorMOG2 bg_model;//(100, 3, 0.3, 5); Ptr<BackgroundSubtractor> bg_model = createBackgroundSubtractorMOG2();
Mat img, fgmask, fgimg; Mat img, fgmask, fgimg;
@ -68,13 +68,13 @@ int main(int argc, const char** argv)
fgimg.create(img.size(), img.type()); fgimg.create(img.size(), img.type());
//update the model //update the model
bg_model(img, fgmask, update_bg_model ? -1 : 0); bg_model->apply(img, fgmask, update_bg_model ? -1 : 0);
fgimg = Scalar::all(0); fgimg = Scalar::all(0);
img.copyTo(fgimg, fgmask); img.copyTo(fgimg, fgmask);
Mat bgimg; Mat bgimg;
bg_model.getBackgroundImage(bgimg); bg_model->getBackgroundImage(bgimg);
imshow("image", img); imshow("image", img);
imshow("foreground mask", fgmask); imshow("foreground mask", fgmask);

View File

@ -87,15 +87,15 @@ int main(int argc, char** argv)
namedWindow("video", 1); namedWindow("video", 1);
namedWindow("segmented", 1); namedWindow("segmented", 1);
BackgroundSubtractorMOG bgsubtractor; Ptr<BackgroundSubtractorMOG> bgsubtractor=createBackgroundSubtractorMOG();
bgsubtractor.set("noiseSigma", 10); bgsubtractor->setNoiseSigma(10);
for(;;) for(;;)
{ {
cap >> tmp_frame; cap >> tmp_frame;
if( !tmp_frame.data ) if( !tmp_frame.data )
break; break;
bgsubtractor(tmp_frame, bgmask, update_bg_model ? -1 : 0); bgsubtractor->apply(tmp_frame, bgmask, update_bg_model ? -1 : 0);
//CvMat _bgmask = bgmask; //CvMat _bgmask = bgmask;
//cvSegmentFGMask(&_bgmask); //cvSegmentFGMask(&_bgmask);
refineSegments(tmp_frame, bgmask, out_frame); refineSegments(tmp_frame, bgmask, out_frame);

View File

@ -1316,10 +1316,10 @@ TEST(MOG)
cv::Mat frame; cv::Mat frame;
cap >> frame; cap >> frame;
cv::BackgroundSubtractorMOG mog; cv::Ptr<cv::BackgroundSubtractor> mog = cv::createBackgroundSubtractorMOG();
cv::Mat foreground; cv::Mat foreground;
mog(frame, foreground, 0.01); mog->apply(frame, foreground, 0.01);
while (!TestSystem::instance().stop()) while (!TestSystem::instance().stop())
{ {
@ -1327,7 +1327,7 @@ TEST(MOG)
TestSystem::instance().cpuOn(); TestSystem::instance().cpuOn();
mog(frame, foreground, 0.01); mog->apply(frame, foreground, 0.01);
TestSystem::instance().cpuOff(); TestSystem::instance().cpuOff();
} }
@ -1367,12 +1367,12 @@ TEST(MOG2)
cv::Mat frame; cv::Mat frame;
cap >> frame; cap >> frame;
cv::BackgroundSubtractorMOG2 mog2; cv::Ptr<cv::BackgroundSubtractor> mog2 = cv::createBackgroundSubtractorMOG2();
cv::Mat foreground; cv::Mat foreground;
cv::Mat background; cv::Mat background;
mog2(frame, foreground); mog2->apply(frame, foreground);
mog2.getBackgroundImage(background); mog2->getBackgroundImage(background);
while (!TestSystem::instance().stop()) while (!TestSystem::instance().stop())
{ {
@ -1380,8 +1380,8 @@ TEST(MOG2)
TestSystem::instance().cpuOn(); TestSystem::instance().cpuOn();
mog2(frame, foreground); mog2->apply(frame, foreground);
mog2.getBackgroundImage(background); mog2->getBackgroundImage(background);
TestSystem::instance().cpuOff(); TestSystem::instance().cpuOff();
} }