Merge pull request #13029 from alalek:move_videostab_contrib

This commit is contained in:
Alexander Alekhin 2018-11-08 19:03:14 +03:00 committed by GitHub
commit 9b978afaa2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
35 changed files with 0 additions and 6834 deletions

View File

@ -1,12 +0,0 @@
set(the_description "Video stabilization")
if(HAVE_CUDA)
ocv_warnings_disable(CMAKE_CXX_FLAGS -Wundef -Wmissing-declarations -Wshadow -Wunused-parameter)
endif()
if(DEFINED WINRT AND NOT DEFINED ENABLE_WINRT_MODE_NATIVE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /ZW")
endif()
ocv_define_module(videostab opencv_imgproc opencv_features2d opencv_video opencv_photo opencv_calib3d
OPTIONAL opencv_cudawarping opencv_cudaoptflow opencv_videoio WRAP python)

View File

@ -1,81 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_HPP
#define OPENCV_VIDEOSTAB_HPP
/**
@defgroup videostab Video Stabilization
The video stabilization module contains a set of functions and classes that can be used to solve the
problem of video stabilization. There are a few methods implemented, most of them are described in
the papers @cite OF06 and @cite G11 . However, there are some extensions and deviations from the original
paper methods.
### References
1. "Full-Frame Video Stabilization with Motion Inpainting"
Yasuyuki Matsushita, Eyal Ofek, Weina Ge, Xiaoou Tang, Senior Member, and Heung-Yeung Shum
2. "Auto-Directed Video Stabilization with Robust L1 Optimal Camera Paths"
Matthias Grundmann, Vivek Kwatra, Irfan Essa
@{
@defgroup videostab_motion Global Motion Estimation
The video stabilization module contains a set of functions and classes for global motion estimation
between point clouds or between images. In the last case features are extracted and matched
internally. For the sake of convenience the motion estimation functions are wrapped into classes.
Both the functions and the classes are available.
@defgroup videostab_marching Fast Marching Method
The Fast Marching Method @cite Telea04 is used in of the video stabilization routines to do motion and
color inpainting. The method is implemented is a flexible way and it's made public for other users.
@}
*/
#include "opencv2/videostab/stabilizer.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
#endif

View File

@ -1,116 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_DEBLURRING_HPP
#define OPENCV_VIDEOSTAB_DEBLURRING_HPP
#include <vector>
#include "opencv2/core.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab
//! @{
CV_EXPORTS float calcBlurriness(const Mat &frame);
class CV_EXPORTS DeblurerBase
{
public:
DeblurerBase() : radius_(0), frames_(0), motions_(0), blurrinessRates_(0) {}
virtual ~DeblurerBase() {}
virtual void setRadius(int val) { radius_ = val; }
virtual int radius() const { return radius_; }
virtual void deblur(int idx, Mat &frame) = 0;
// data from stabilizer
virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; }
virtual const std::vector<Mat>& frames() const { return *frames_; }
virtual void setMotions(const std::vector<Mat> &val) { motions_ = &val; }
virtual const std::vector<Mat>& motions() const { return *motions_; }
virtual void setBlurrinessRates(const std::vector<float> &val) { blurrinessRates_ = &val; }
virtual const std::vector<float>& blurrinessRates() const { return *blurrinessRates_; }
protected:
int radius_;
const std::vector<Mat> *frames_;
const std::vector<Mat> *motions_;
const std::vector<float> *blurrinessRates_;
};
class CV_EXPORTS NullDeblurer : public DeblurerBase
{
public:
virtual void deblur(int /*idx*/, Mat &/*frame*/) CV_OVERRIDE {}
};
class CV_EXPORTS WeightingDeblurer : public DeblurerBase
{
public:
WeightingDeblurer();
void setSensitivity(float val) { sensitivity_ = val; }
float sensitivity() const { return sensitivity_; }
virtual void deblur(int idx, Mat &frame) CV_OVERRIDE;
private:
float sensitivity_;
Mat_<float> bSum_, gSum_, rSum_, wSum_;
};
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,121 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_FAST_MARCHING_HPP
#define OPENCV_VIDEOSTAB_FAST_MARCHING_HPP
#include <cmath>
#include <queue>
#include <algorithm>
#include "opencv2/core.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab_marching
//! @{
/** @brief Describes the Fast Marching Method implementation.
See http://iwi.eldoc.ub.rug.nl/FILES/root/2004/JGraphToolsTelea/2004JGraphToolsTelea.pdf
*/
class CV_EXPORTS FastMarchingMethod
{
public:
FastMarchingMethod() : inf_(1e6f), size_(0) {}
/** @brief Template method that runs the Fast Marching Method.
@param mask Image mask. 0 value indicates that the pixel value must be inpainted, 255 indicates
that the pixel value is known, other values aren't acceptable.
@param inpaint Inpainting functor that overloads void operator ()(int x, int y).
@return Inpainting functor.
*/
template <typename Inpaint>
Inpaint run(const Mat &mask, Inpaint inpaint);
/**
@return Distance map that's created during working of the method.
*/
Mat distanceMap() const { return dist_; }
private:
enum { INSIDE = 0, BAND = 1, KNOWN = 255 };
struct DXY
{
float dist;
int x, y;
DXY() : dist(0), x(0), y(0) {}
DXY(float _dist, int _x, int _y) : dist(_dist), x(_x), y(_y) {}
bool operator <(const DXY &dxy) const { return dist < dxy.dist; }
};
float solve(int x1, int y1, int x2, int y2) const;
int& indexOf(const DXY &dxy) { return index_(dxy.y, dxy.x); }
void heapUp(int idx);
void heapDown(int idx);
void heapAdd(const DXY &dxy);
void heapRemoveMin();
float inf_;
cv::Mat_<uchar> flag_; // flag map
cv::Mat_<float> dist_; // distance map
cv::Mat_<int> index_; // index of point in the narrow band
std::vector<DXY> narrowBand_; // narrow band heap
int size_; // narrow band size
};
//! @}
} // namespace videostab
} // namespace cv
#include "fast_marching_inl.hpp"
#endif

View File

@ -1,165 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP
#define OPENCV_VIDEOSTAB_FAST_MARCHING_INL_HPP
#include "opencv2/videostab/fast_marching.hpp"
namespace cv
{
namespace videostab
{
template <typename Inpaint>
Inpaint FastMarchingMethod::run(const cv::Mat &mask, Inpaint inpaint)
{
using namespace cv;
CV_Assert(mask.type() == CV_8U);
static const int lut[4][2] = {{-1,0}, {0,-1}, {1,0}, {0,1}};
mask.copyTo(flag_);
flag_.create(mask.size());
dist_.create(mask.size());
index_.create(mask.size());
narrowBand_.clear();
size_ = 0;
// init
for (int y = 0; y < flag_.rows; ++y)
{
for (int x = 0; x < flag_.cols; ++x)
{
if (flag_(y,x) == KNOWN)
dist_(y,x) = 0.f;
else
{
int n = 0;
int nunknown = 0;
for (int i = 0; i < 4; ++i)
{
int xn = x + lut[i][0];
int yn = y + lut[i][1];
if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows)
{
n++;
if (flag_(yn,xn) != KNOWN)
nunknown++;
}
}
if (n>0 && nunknown == n)
{
dist_(y,x) = inf_;
flag_(y,x) = INSIDE;
}
else
{
dist_(y,x) = 0.f;
flag_(y,x) = BAND;
inpaint(x, y);
narrowBand_.push_back(DXY(0.f,x,y));
index_(y,x) = size_++;
}
}
}
}
// make heap
for (int i = size_/2-1; i >= 0; --i)
heapDown(i);
// main cycle
while (size_ > 0)
{
int x = narrowBand_[0].x;
int y = narrowBand_[0].y;
heapRemoveMin();
flag_(y,x) = KNOWN;
for (int n = 0; n < 4; ++n)
{
int xn = x + lut[n][0];
int yn = y + lut[n][1];
if (xn >= 0 && xn < flag_.cols && yn >= 0 && yn < flag_.rows && flag_(yn,xn) != KNOWN)
{
dist_(yn,xn) = std::min(std::min(solve(xn-1, yn, xn, yn-1), solve(xn+1, yn, xn, yn-1)),
std::min(solve(xn-1, yn, xn, yn+1), solve(xn+1, yn, xn, yn+1)));
if (flag_(yn,xn) == INSIDE)
{
flag_(yn,xn) = BAND;
inpaint(xn, yn);
heapAdd(DXY(dist_(yn,xn),xn,yn));
}
else
{
int i = index_(yn,xn);
if (dist_(yn,xn) < narrowBand_[i].dist)
{
narrowBand_[i].dist = dist_(yn,xn);
heapUp(i);
}
// works better if it's commented out
/*else if (dist(yn,xn) > narrowBand[i].dist)
{
narrowBand[i].dist = dist(yn,xn);
heapDown(i);
}*/
}
}
}
}
return inpaint;
}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,94 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP
#define OPENCV_VIDEOSTAB_FRAME_SOURCE_HPP
#include <vector>
#include "opencv2/core.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab
//! @{
class CV_EXPORTS IFrameSource
{
public:
virtual ~IFrameSource() {}
virtual void reset() = 0;
virtual Mat nextFrame() = 0;
};
class CV_EXPORTS NullFrameSource : public IFrameSource
{
public:
virtual void reset() CV_OVERRIDE {}
virtual Mat nextFrame() CV_OVERRIDE { return Mat(); }
};
class CV_EXPORTS VideoFileSource : public IFrameSource
{
public:
VideoFileSource(const String &path, bool volatileFrame = false);
virtual void reset() CV_OVERRIDE;
virtual Mat nextFrame() CV_OVERRIDE;
int width();
int height();
int count();
double fps();
private:
Ptr<IFrameSource> impl;
};
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,300 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP
#define OPENCV_VIDEOSTAB_GLOBAL_MOTION_HPP
#include <vector>
#include <fstream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/opencv_modules.hpp"
#include "opencv2/videostab/optical_flow.hpp"
#include "opencv2/videostab/motion_core.hpp"
#include "opencv2/videostab/outlier_rejection.hpp"
#ifdef HAVE_OPENCV_CUDAIMGPROC
# include "opencv2/cudaimgproc.hpp"
#endif
namespace cv
{
namespace videostab
{
//! @addtogroup videostab_motion
//! @{
/** @brief Estimates best global motion between two 2D point clouds in the least-squares sense.
@note Works in-place and changes input point arrays.
@param points0 Source set of 2D points (32F).
@param points1 Destination set of 2D points (32F).
@param model Motion model (up to MM_AFFINE).
@param rmse Final root-mean-square error.
@return 3x3 2D transformation matrix (32F).
*/
CV_EXPORTS Mat estimateGlobalMotionLeastSquares(
InputOutputArray points0, InputOutputArray points1, int model = MM_AFFINE,
float *rmse = 0);
/** @brief Estimates best global motion between two 2D point clouds robustly (using RANSAC method).
@param points0 Source set of 2D points (32F).
@param points1 Destination set of 2D points (32F).
@param model Motion model. See cv::videostab::MotionModel.
@param params RANSAC method parameters. See videostab::RansacParams.
@param rmse Final root-mean-square error.
@param ninliers Final number of inliers.
*/
CV_EXPORTS Mat estimateGlobalMotionRansac(
InputArray points0, InputArray points1, int model = MM_AFFINE,
const RansacParams &params = RansacParams::default2dMotion(MM_AFFINE),
float *rmse = 0, int *ninliers = 0);
/** @brief Base class for all global motion estimation methods.
*/
class CV_EXPORTS MotionEstimatorBase
{
public:
virtual ~MotionEstimatorBase() {}
/** @brief Sets motion model.
@param val Motion model. See cv::videostab::MotionModel.
*/
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
/**
@return Motion model. See cv::videostab::MotionModel.
*/
virtual MotionModel motionModel() const { return motionModel_; }
/** @brief Estimates global motion between two 2D point clouds.
@param points0 Source set of 2D points (32F).
@param points1 Destination set of 2D points (32F).
@param ok Indicates whether motion was estimated successfully.
@return 3x3 2D transformation matrix (32F).
*/
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) = 0;
protected:
MotionEstimatorBase(MotionModel model) { setMotionModel(model); }
private:
MotionModel motionModel_;
};
/** @brief Describes a robust RANSAC-based global 2D motion estimation method which minimizes L2 error.
*/
class CV_EXPORTS MotionEstimatorRansacL2 : public MotionEstimatorBase
{
public:
MotionEstimatorRansacL2(MotionModel model = MM_AFFINE);
void setRansacParams(const RansacParams &val) { ransacParams_ = val; }
RansacParams ransacParams() const { return ransacParams_; }
void setMinInlierRatio(float val) { minInlierRatio_ = val; }
float minInlierRatio() const { return minInlierRatio_; }
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) CV_OVERRIDE;
private:
RansacParams ransacParams_;
float minInlierRatio_;
};
/** @brief Describes a global 2D motion estimation method which minimizes L1 error.
@note To be able to use this method you must build OpenCV with CLP library support. :
*/
class CV_EXPORTS MotionEstimatorL1 : public MotionEstimatorBase
{
public:
MotionEstimatorL1(MotionModel model = MM_AFFINE);
virtual Mat estimate(InputArray points0, InputArray points1, bool *ok = 0) CV_OVERRIDE;
private:
std::vector<double> obj_, collb_, colub_;
std::vector<double> elems_, rowlb_, rowub_;
std::vector<int> rows_, cols_;
void set(int row, int col, double coef)
{
rows_.push_back(row);
cols_.push_back(col);
elems_.push_back(coef);
}
};
/** @brief Base class for global 2D motion estimation methods which take frames as input.
*/
class CV_EXPORTS ImageMotionEstimatorBase
{
public:
virtual ~ImageMotionEstimatorBase() {}
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
virtual MotionModel motionModel() const { return motionModel_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) = 0;
protected:
ImageMotionEstimatorBase(MotionModel model) { setMotionModel(model); }
private:
MotionModel motionModel_;
};
class CV_EXPORTS FromFileMotionReader : public ImageMotionEstimatorBase
{
public:
FromFileMotionReader(const String &path);
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE;
private:
std::ifstream file_;
};
class CV_EXPORTS ToFileMotionWriter : public ImageMotionEstimatorBase
{
public:
ToFileMotionWriter(const String &path, Ptr<ImageMotionEstimatorBase> estimator);
virtual void setMotionModel(MotionModel val) CV_OVERRIDE { motionEstimator_->setMotionModel(val); }
virtual MotionModel motionModel() const CV_OVERRIDE { return motionEstimator_->motionModel(); }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE;
private:
std::ofstream file_;
Ptr<ImageMotionEstimatorBase> motionEstimator_;
};
/** @brief Describes a global 2D motion estimation method which uses keypoints detection and optical flow for
matching.
*/
class CV_EXPORTS KeypointBasedMotionEstimator : public ImageMotionEstimatorBase
{
public:
KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator);
virtual void setMotionModel(MotionModel val) CV_OVERRIDE { motionEstimator_->setMotionModel(val); }
virtual MotionModel motionModel() const CV_OVERRIDE { return motionEstimator_->motionModel(); }
void setDetector(Ptr<FeatureDetector> val) { detector_ = val; }
Ptr<FeatureDetector> detector() const { return detector_; }
void setOpticalFlowEstimator(Ptr<ISparseOptFlowEstimator> val) { optFlowEstimator_ = val; }
Ptr<ISparseOptFlowEstimator> opticalFlowEstimator() const { return optFlowEstimator_; }
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE;
Mat estimate(InputArray frame0, InputArray frame1, bool *ok = 0);
private:
Ptr<MotionEstimatorBase> motionEstimator_;
Ptr<FeatureDetector> detector_;
Ptr<ISparseOptFlowEstimator> optFlowEstimator_;
Ptr<IOutlierRejector> outlierRejector_;
std::vector<uchar> status_;
std::vector<KeyPoint> keypointsPrev_;
std::vector<Point2f> pointsPrev_, points_;
std::vector<Point2f> pointsPrevGood_, pointsGood_;
};
#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
class CV_EXPORTS KeypointBasedMotionEstimatorGpu : public ImageMotionEstimatorBase
{
public:
KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator);
virtual void setMotionModel(MotionModel val) CV_OVERRIDE { motionEstimator_->setMotionModel(val); }
virtual MotionModel motionModel() const CV_OVERRIDE { return motionEstimator_->motionModel(); }
void setOutlierRejector(Ptr<IOutlierRejector> val) { outlierRejector_ = val; }
Ptr<IOutlierRejector> outlierRejector() const { return outlierRejector_; }
virtual Mat estimate(const Mat &frame0, const Mat &frame1, bool *ok = 0) CV_OVERRIDE;
Mat estimate(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, bool *ok = 0);
private:
Ptr<MotionEstimatorBase> motionEstimator_;
Ptr<cuda::CornersDetector> detector_;
SparsePyrLkOptFlowEstimatorGpu optFlowEstimator_;
Ptr<IOutlierRejector> outlierRejector_;
cuda::GpuMat frame0_, grayFrame0_, frame1_;
cuda::GpuMat pointsPrev_, points_;
cuda::GpuMat status_;
Mat hostPointsPrev_, hostPoints_;
std::vector<Point2f> hostPointsPrevTmp_, hostPointsTmp_;
std::vector<uchar> rejectionStatus_;
};
#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
/** @brief Computes motion between two frames assuming that all the intermediate motions are known.
@param from Source frame index.
@param to Destination frame index.
@param motions Pair-wise motions. motions[i] denotes motion from the frame i to the frame i+1
@return Motion from the Source frame to the Destination frame.
*/
CV_EXPORTS Mat getMotion(int from, int to, const std::vector<Mat> &motions);
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,212 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_INPAINTINT_HPP
#define OPENCV_VIDEOSTAB_INPAINTINT_HPP
#include <vector>
#include "opencv2/core.hpp"
#include "opencv2/videostab/optical_flow.hpp"
#include "opencv2/videostab/fast_marching.hpp"
#include "opencv2/videostab/global_motion.hpp"
#include "opencv2/photo.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab
//! @{
class CV_EXPORTS InpainterBase
{
public:
InpainterBase()
: radius_(0), motionModel_(MM_UNKNOWN), frames_(0), motions_(0),
stabilizedFrames_(0), stabilizationMotions_(0) {}
virtual ~InpainterBase() {}
virtual void setRadius(int val) { radius_ = val; }
virtual int radius() const { return radius_; }
virtual void setMotionModel(MotionModel val) { motionModel_ = val; }
virtual MotionModel motionModel() const { return motionModel_; }
virtual void inpaint(int idx, Mat &frame, Mat &mask) = 0;
// data from stabilizer
virtual void setFrames(const std::vector<Mat> &val) { frames_ = &val; }
virtual const std::vector<Mat>& frames() const { return *frames_; }
virtual void setMotions(const std::vector<Mat> &val) { motions_ = &val; }
virtual const std::vector<Mat>& motions() const { return *motions_; }
virtual void setStabilizedFrames(const std::vector<Mat> &val) { stabilizedFrames_ = &val; }
virtual const std::vector<Mat>& stabilizedFrames() const { return *stabilizedFrames_; }
virtual void setStabilizationMotions(const std::vector<Mat> &val) { stabilizationMotions_ = &val; }
virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; }
protected:
int radius_;
MotionModel motionModel_;
const std::vector<Mat> *frames_;
const std::vector<Mat> *motions_;
const std::vector<Mat> *stabilizedFrames_;
const std::vector<Mat> *stabilizationMotions_;
};
class CV_EXPORTS NullInpainter : public InpainterBase
{
public:
virtual void inpaint(int /*idx*/, Mat &/*frame*/, Mat &/*mask*/) CV_OVERRIDE {}
};
class CV_EXPORTS InpaintingPipeline : public InpainterBase
{
public:
void pushBack(Ptr<InpainterBase> inpainter) { inpainters_.push_back(inpainter); }
bool empty() const { return inpainters_.empty(); }
virtual void setRadius(int val) CV_OVERRIDE;
virtual void setMotionModel(MotionModel val) CV_OVERRIDE;
virtual void setFrames(const std::vector<Mat> &val) CV_OVERRIDE;
virtual void setMotions(const std::vector<Mat> &val) CV_OVERRIDE;
virtual void setStabilizedFrames(const std::vector<Mat> &val) CV_OVERRIDE;
virtual void setStabilizationMotions(const std::vector<Mat> &val) CV_OVERRIDE;
virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
private:
std::vector<Ptr<InpainterBase> > inpainters_;
};
class CV_EXPORTS ConsistentMosaicInpainter : public InpainterBase
{
public:
ConsistentMosaicInpainter();
void setStdevThresh(float val) { stdevThresh_ = val; }
float stdevThresh() const { return stdevThresh_; }
virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
private:
float stdevThresh_;
};
class CV_EXPORTS MotionInpainter : public InpainterBase
{
public:
MotionInpainter();
void setOptFlowEstimator(Ptr<IDenseOptFlowEstimator> val) { optFlowEstimator_ = val; }
Ptr<IDenseOptFlowEstimator> optFlowEstimator() const { return optFlowEstimator_; }
void setFlowErrorThreshold(float val) { flowErrorThreshold_ = val; }
float flowErrorThreshold() const { return flowErrorThreshold_; }
void setDistThreshold(float val) { distThresh_ = val; }
float distThresh() const { return distThresh_; }
void setBorderMode(int val) { borderMode_ = val; }
int borderMode() const { return borderMode_; }
virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
private:
FastMarchingMethod fmm_;
Ptr<IDenseOptFlowEstimator> optFlowEstimator_;
float flowErrorThreshold_;
float distThresh_;
int borderMode_;
Mat frame1_, transformedFrame1_;
Mat_<uchar> grayFrame_, transformedGrayFrame1_;
Mat_<uchar> mask1_, transformedMask1_;
Mat_<float> flowX_, flowY_, flowErrors_;
Mat_<uchar> flowMask_;
};
class CV_EXPORTS ColorAverageInpainter : public InpainterBase
{
public:
virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
private:
FastMarchingMethod fmm_;
};
class CV_EXPORTS ColorInpainter : public InpainterBase
{
public:
ColorInpainter(int method = INPAINT_TELEA, double radius = 2.);
virtual void inpaint(int idx, Mat &frame, Mat &mask) CV_OVERRIDE;
private:
int method_;
double radius_;
Mat invMask_;
};
inline ColorInpainter::ColorInpainter(int _method, double _radius)
: method_(_method), radius_(_radius) {}
CV_EXPORTS void calcFlowMask(
const Mat &flowX, const Mat &flowY, const Mat &errors, float maxError,
const Mat &mask0, const Mat &mask1, Mat &flowMask);
CV_EXPORTS void completeFrameAccordingToFlow(
const Mat &flowMask, const Mat &flowX, const Mat &flowY, const Mat &frame1, const Mat &mask1,
float distThresh, Mat& frame0, Mat &mask0);
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,80 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_LOG_HPP
#define OPENCV_VIDEOSTAB_LOG_HPP
#include "opencv2/core.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab
//! @{
class CV_EXPORTS ILog
{
public:
virtual ~ILog() {}
virtual void print(const char *format, ...) = 0;
};
class CV_EXPORTS NullLog : public ILog
{
public:
virtual void print(const char * /*format*/, ...) CV_OVERRIDE {}
};
class CV_EXPORTS LogToStdout : public ILog
{
public:
virtual void print(const char *format, ...) CV_OVERRIDE;
};
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,129 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_MOTION_CORE_HPP
#define OPENCV_VIDEOSTAB_MOTION_CORE_HPP
#include <cmath>
#include "opencv2/core.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab_motion
//! @{
/** @brief Describes motion model between two point clouds.
*/
enum MotionModel
{
MM_TRANSLATION = 0,
MM_TRANSLATION_AND_SCALE = 1,
MM_ROTATION = 2,
MM_RIGID = 3,
MM_SIMILARITY = 4,
MM_AFFINE = 5,
MM_HOMOGRAPHY = 6,
MM_UNKNOWN = 7
};
/** @brief Describes RANSAC method parameters.
*/
struct CV_EXPORTS RansacParams
{
int size; //!< subset size
float thresh; //!< max error to classify as inlier
float eps; //!< max outliers ratio
float prob; //!< probability of success
RansacParams() : size(0), thresh(0), eps(0), prob(0) {}
/** @brief Constructor
@param size Subset size.
@param thresh Maximum re-projection error value to classify as inlier.
@param eps Maximum ratio of incorrect correspondences.
@param prob Required success probability.
*/
RansacParams(int size, float thresh, float eps, float prob);
/**
@return Number of iterations that'll be performed by RANSAC method.
*/
int niters() const
{
return static_cast<int>(
std::ceil(std::log(1 - prob) / std::log(1 - std::pow(1 - eps, size))));
}
/**
@param model Motion model. See cv::videostab::MotionModel.
@return Default RANSAC method parameters for the given motion model.
*/
static RansacParams default2dMotion(MotionModel model)
{
CV_Assert(model < MM_UNKNOWN);
if (model == MM_TRANSLATION)
return RansacParams(1, 0.5f, 0.5f, 0.99f);
if (model == MM_TRANSLATION_AND_SCALE)
return RansacParams(2, 0.5f, 0.5f, 0.99f);
if (model == MM_ROTATION)
return RansacParams(1, 0.5f, 0.5f, 0.99f);
if (model == MM_RIGID)
return RansacParams(2, 0.5f, 0.5f, 0.99f);
if (model == MM_SIMILARITY)
return RansacParams(2, 0.5f, 0.5f, 0.99f);
if (model == MM_AFFINE)
return RansacParams(3, 0.5f, 0.5f, 0.99f);
return RansacParams(4, 0.5f, 0.5f, 0.99f);
}
};
inline RansacParams::RansacParams(int _size, float _thresh, float _eps, float _prob)
: size(_size), thresh(_thresh), eps(_eps), prob(_prob) {}
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,174 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP
#define OPENCV_VIDEOSTAB_MOTION_STABILIZING_HPP
#include <vector>
#include <utility>
#include "opencv2/core.hpp"
#include "opencv2/videostab/global_motion.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab_motion
//! @{
class CV_EXPORTS IMotionStabilizer
{
public:
virtual ~IMotionStabilizer() {}
//! assumes that [0, size-1) is in or equals to [range.first, range.second)
virtual void stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range,
Mat *stabilizationMotions) = 0;
};
class CV_EXPORTS MotionStabilizationPipeline : public IMotionStabilizer
{
public:
void pushBack(Ptr<IMotionStabilizer> stabilizer) { stabilizers_.push_back(stabilizer); }
bool empty() const { return stabilizers_.empty(); }
virtual void stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range,
Mat *stabilizationMotions) CV_OVERRIDE;
private:
std::vector<Ptr<IMotionStabilizer> > stabilizers_;
};
class CV_EXPORTS MotionFilterBase : public IMotionStabilizer
{
public:
virtual ~MotionFilterBase() {}
virtual Mat stabilize(
int idx, const std::vector<Mat> &motions, std::pair<int,int> range) = 0;
virtual void stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range,
Mat *stabilizationMotions) CV_OVERRIDE;
};
class CV_EXPORTS GaussianMotionFilter : public MotionFilterBase
{
public:
GaussianMotionFilter(int radius = 15, float stdev = -1.f);
void setParams(int radius, float stdev = -1.f);
int radius() const { return radius_; }
float stdev() const { return stdev_; }
virtual Mat stabilize(
int idx, const std::vector<Mat> &motions, std::pair<int,int> range) CV_OVERRIDE;
private:
int radius_;
float stdev_;
std::vector<float> weight_;
};
inline GaussianMotionFilter::GaussianMotionFilter(int _radius, float _stdev) { setParams(_radius, _stdev); }
class CV_EXPORTS LpMotionStabilizer : public IMotionStabilizer
{
public:
LpMotionStabilizer(MotionModel model = MM_SIMILARITY);
void setMotionModel(MotionModel val) { model_ = val; }
MotionModel motionModel() const { return model_; }
void setFrameSize(Size val) { frameSize_ = val; }
Size frameSize() const { return frameSize_; }
void setTrimRatio(float val) { trimRatio_ = val; }
float trimRatio() const { return trimRatio_; }
void setWeight1(float val) { w1_ = val; }
float weight1() const { return w1_; }
void setWeight2(float val) { w2_ = val; }
float weight2() const { return w2_; }
void setWeight3(float val) { w3_ = val; }
float weight3() const { return w3_; }
void setWeight4(float val) { w4_ = val; }
float weight4() const { return w4_; }
virtual void stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range,
Mat *stabilizationMotions) CV_OVERRIDE;
private:
MotionModel model_;
Size frameSize_;
float trimRatio_;
float w1_, w2_, w3_, w4_;
std::vector<double> obj_, collb_, colub_;
std::vector<int> rows_, cols_;
std::vector<double> elems_, rowlb_, rowub_;
void set(int row, int col, double coef)
{
rows_.push_back(row);
cols_.push_back(col);
elems_.push_back(coef);
}
};
CV_EXPORTS Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio);
CV_EXPORTS float estimateOptimalTrimRatio(const Mat &M, Size size);
//! @}
} // namespace videostab
} // namespace
#endif

View File

@ -1,150 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP
#define OPENCV_VIDEOSTAB_OPTICAL_FLOW_HPP
#include "opencv2/core.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_CUDAOPTFLOW
#include "opencv2/cudaoptflow.hpp"
#endif
namespace cv
{
namespace videostab
{
//! @addtogroup videostab
//! @{
class CV_EXPORTS ISparseOptFlowEstimator
{
public:
virtual ~ISparseOptFlowEstimator() {}
virtual void run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors) = 0;
};
class CV_EXPORTS IDenseOptFlowEstimator
{
public:
virtual ~IDenseOptFlowEstimator() {}
virtual void run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors) = 0;
};
class CV_EXPORTS PyrLkOptFlowEstimatorBase
{
public:
PyrLkOptFlowEstimatorBase() { setWinSize(Size(21, 21)); setMaxLevel(3); }
virtual void setWinSize(Size val) { winSize_ = val; }
virtual Size winSize() const { return winSize_; }
virtual void setMaxLevel(int val) { maxLevel_ = val; }
virtual int maxLevel() const { return maxLevel_; }
virtual ~PyrLkOptFlowEstimatorBase() {}
protected:
Size winSize_;
int maxLevel_;
};
class CV_EXPORTS SparsePyrLkOptFlowEstimator
: public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator
{
public:
virtual void run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors) CV_OVERRIDE;
};
#ifdef HAVE_OPENCV_CUDAOPTFLOW
class CV_EXPORTS SparsePyrLkOptFlowEstimatorGpu
: public PyrLkOptFlowEstimatorBase, public ISparseOptFlowEstimator
{
public:
SparsePyrLkOptFlowEstimatorGpu();
virtual void run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors) CV_OVERRIDE;
void run(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1,
cuda::GpuMat &status, cuda::GpuMat &errors);
void run(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0, cuda::GpuMat &points1,
cuda::GpuMat &status);
private:
Ptr<cuda::SparsePyrLKOpticalFlow> optFlowEstimator_;
cuda::GpuMat frame0_, frame1_, points0_, points1_, status_, errors_;
};
class CV_EXPORTS DensePyrLkOptFlowEstimatorGpu
: public PyrLkOptFlowEstimatorBase, public IDenseOptFlowEstimator
{
public:
DensePyrLkOptFlowEstimatorGpu();
virtual void run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors) CV_OVERRIDE;
private:
Ptr<cuda::DensePyrLKOpticalFlow> optFlowEstimator_;
cuda::GpuMat frame0_, frame1_, flowX_, flowY_, errors_;
};
#endif
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,101 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_OUTLIER_REJECTION_HPP
#define OPENCV_VIDEOSTAB_OUTLIER_REJECTION_HPP
#include <vector>
#include "opencv2/core.hpp"
#include "opencv2/videostab/motion_core.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab
//! @{
class CV_EXPORTS IOutlierRejector
{
public:
virtual ~IOutlierRejector() {}
virtual void process(
Size frameSize, InputArray points0, InputArray points1, OutputArray mask) = 0;
};
class CV_EXPORTS NullOutlierRejector : public IOutlierRejector
{
public:
virtual void process(
Size frameSize, InputArray points0, InputArray points1, OutputArray mask) CV_OVERRIDE;
};
class CV_EXPORTS TranslationBasedLocalOutlierRejector : public IOutlierRejector
{
public:
TranslationBasedLocalOutlierRejector();
void setCellSize(Size val) { cellSize_ = val; }
Size cellSize() const { return cellSize_; }
void setRansacParams(RansacParams val) { ransacParams_ = val; }
RansacParams ransacParams() const { return ransacParams_; }
virtual void process(
Size frameSize, InputArray points0, InputArray points1, OutputArray mask) CV_OVERRIDE;
private:
Size cellSize_;
RansacParams ransacParams_;
typedef std::vector<int> Cell;
std::vector<Cell> grid_;
};
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,72 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_RING_BUFFER_HPP
#define OPENCV_VIDEOSTAB_RING_BUFFER_HPP
#include <vector>
#include "opencv2/imgproc.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab
//! @{
template <typename T> inline T& at(int idx, std::vector<T> &items)
{
return items[cv::borderInterpolate(idx, static_cast<int>(items.size()), cv::BORDER_WRAP)];
}
template <typename T> inline const T& at(int idx, const std::vector<T> &items)
{
return items[cv::borderInterpolate(idx, static_cast<int>(items.size()), cv::BORDER_WRAP)];
}
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,200 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_STABILIZER_HPP
#define OPENCV_VIDEOSTAB_STABILIZER_HPP
#include <vector>
#include <ctime>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videostab/global_motion.hpp"
#include "opencv2/videostab/motion_stabilizing.hpp"
#include "opencv2/videostab/frame_source.hpp"
#include "opencv2/videostab/log.hpp"
#include "opencv2/videostab/inpainting.hpp"
#include "opencv2/videostab/deblurring.hpp"
#include "opencv2/videostab/wobble_suppression.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab
//! @{
class CV_EXPORTS StabilizerBase
{
public:
virtual ~StabilizerBase() {}
void setLog(Ptr<ILog> ilog) { log_ = ilog; }
Ptr<ILog> log() const { return log_; }
void setRadius(int val) { radius_ = val; }
int radius() const { return radius_; }
void setFrameSource(Ptr<IFrameSource> val) { frameSource_ = val; }
Ptr<IFrameSource> frameSource() const { return frameSource_; }
void setMotionEstimator(Ptr<ImageMotionEstimatorBase> val) { motionEstimator_ = val; }
Ptr<ImageMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }
void setDeblurer(Ptr<DeblurerBase> val) { deblurer_ = val; }
Ptr<DeblurerBase> deblurrer() const { return deblurer_; }
void setTrimRatio(float val) { trimRatio_ = val; }
float trimRatio() const { return trimRatio_; }
void setCorrectionForInclusion(bool val) { doCorrectionForInclusion_ = val; }
bool doCorrectionForInclusion() const { return doCorrectionForInclusion_; }
void setBorderMode(int val) { borderMode_ = val; }
int borderMode() const { return borderMode_; }
void setInpainter(Ptr<InpainterBase> val) { inpainter_ = val; }
Ptr<InpainterBase> inpainter() const { return inpainter_; }
protected:
StabilizerBase();
void reset();
Mat nextStabilizedFrame();
bool doOneIteration();
virtual void setUp(const Mat &firstFrame);
virtual Mat estimateMotion() = 0;
virtual Mat estimateStabilizationMotion() = 0;
void stabilizeFrame();
virtual Mat postProcessFrame(const Mat &frame);
void logProcessingTime();
Ptr<ILog> log_;
Ptr<IFrameSource> frameSource_;
Ptr<ImageMotionEstimatorBase> motionEstimator_;
Ptr<DeblurerBase> deblurer_;
Ptr<InpainterBase> inpainter_;
int radius_;
float trimRatio_;
bool doCorrectionForInclusion_;
int borderMode_;
Size frameSize_;
Mat frameMask_;
int curPos_;
int curStabilizedPos_;
bool doDeblurring_;
Mat preProcessedFrame_;
bool doInpainting_;
Mat inpaintingMask_;
Mat finalFrame_;
std::vector<Mat> frames_;
std::vector<Mat> motions_; // motions_[i] is the motion from i-th to i+1-th frame
std::vector<float> blurrinessRates_;
std::vector<Mat> stabilizedFrames_;
std::vector<Mat> stabilizedMasks_;
std::vector<Mat> stabilizationMotions_;
clock_t processingStartTime_;
};
class CV_EXPORTS OnePassStabilizer : public StabilizerBase, public IFrameSource
{
public:
OnePassStabilizer();
void setMotionFilter(Ptr<MotionFilterBase> val) { motionFilter_ = val; }
Ptr<MotionFilterBase> motionFilter() const { return motionFilter_; }
virtual void reset() CV_OVERRIDE;
virtual Mat nextFrame() CV_OVERRIDE { return nextStabilizedFrame(); }
protected:
virtual void setUp(const Mat &firstFrame) CV_OVERRIDE;
virtual Mat estimateMotion() CV_OVERRIDE;
virtual Mat estimateStabilizationMotion() CV_OVERRIDE;
virtual Mat postProcessFrame(const Mat &frame) CV_OVERRIDE;
Ptr<MotionFilterBase> motionFilter_;
};
class CV_EXPORTS TwoPassStabilizer : public StabilizerBase, public IFrameSource
{
public:
TwoPassStabilizer();
void setMotionStabilizer(Ptr<IMotionStabilizer> val) { motionStabilizer_ = val; }
Ptr<IMotionStabilizer> motionStabilizer() const { return motionStabilizer_; }
void setWobbleSuppressor(Ptr<WobbleSuppressorBase> val) { wobbleSuppressor_ = val; }
Ptr<WobbleSuppressorBase> wobbleSuppressor() const { return wobbleSuppressor_; }
void setEstimateTrimRatio(bool val) { mustEstTrimRatio_ = val; }
bool mustEstimateTrimaRatio() const { return mustEstTrimRatio_; }
virtual void reset() CV_OVERRIDE;
virtual Mat nextFrame() CV_OVERRIDE;
protected:
void runPrePassIfNecessary();
virtual void setUp(const Mat &firstFrame) CV_OVERRIDE;
virtual Mat estimateMotion() CV_OVERRIDE;
virtual Mat estimateStabilizationMotion() CV_OVERRIDE;
virtual Mat postProcessFrame(const Mat &frame) CV_OVERRIDE;
Ptr<IMotionStabilizer> motionStabilizer_;
Ptr<WobbleSuppressorBase> wobbleSuppressor_;
bool mustEstTrimRatio_;
int frameCount_;
bool isPrePassDone_;
bool doWobbleSuppression_;
std::vector<Mat> motions2_;
Mat suppressedFrame_;
};
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,140 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef OPENCV_VIDEOSTAB_WOBBLE_SUPPRESSION_HPP
#define OPENCV_VIDEOSTAB_WOBBLE_SUPPRESSION_HPP
#include <vector>
#include "opencv2/core.hpp"
#include "opencv2/core/cuda.hpp"
#include "opencv2/videostab/global_motion.hpp"
#include "opencv2/videostab/log.hpp"
namespace cv
{
namespace videostab
{
//! @addtogroup videostab
//! @{
class CV_EXPORTS WobbleSuppressorBase
{
public:
WobbleSuppressorBase();
virtual ~WobbleSuppressorBase() {}
void setMotionEstimator(Ptr<ImageMotionEstimatorBase> val) { motionEstimator_ = val; }
Ptr<ImageMotionEstimatorBase> motionEstimator() const { return motionEstimator_; }
virtual void suppress(int idx, const Mat &frame, Mat &result) = 0;
// data from stabilizer
virtual void setFrameCount(int val) { frameCount_ = val; }
virtual int frameCount() const { return frameCount_; }
virtual void setMotions(const std::vector<Mat> &val) { motions_ = &val; }
virtual const std::vector<Mat>& motions() const { return *motions_; }
virtual void setMotions2(const std::vector<Mat> &val) { motions2_ = &val; }
virtual const std::vector<Mat>& motions2() const { return *motions2_; }
virtual void setStabilizationMotions(const std::vector<Mat> &val) { stabilizationMotions_ = &val; }
virtual const std::vector<Mat>& stabilizationMotions() const { return *stabilizationMotions_; }
protected:
Ptr<ImageMotionEstimatorBase> motionEstimator_;
int frameCount_;
const std::vector<Mat> *motions_;
const std::vector<Mat> *motions2_;
const std::vector<Mat> *stabilizationMotions_;
};
class CV_EXPORTS NullWobbleSuppressor : public WobbleSuppressorBase
{
public:
virtual void suppress(int idx, const Mat &frame, Mat &result) CV_OVERRIDE;
};
class CV_EXPORTS MoreAccurateMotionWobbleSuppressorBase : public WobbleSuppressorBase
{
public:
virtual void setPeriod(int val) { period_ = val; }
virtual int period() const { return period_; }
protected:
MoreAccurateMotionWobbleSuppressorBase() { setPeriod(30); }
int period_;
};
class CV_EXPORTS MoreAccurateMotionWobbleSuppressor : public MoreAccurateMotionWobbleSuppressorBase
{
public:
virtual void suppress(int idx, const Mat &frame, Mat &result) CV_OVERRIDE;
private:
Mat_<float> mapx_, mapy_;
};
#if defined(HAVE_OPENCV_CUDAWARPING)
class CV_EXPORTS MoreAccurateMotionWobbleSuppressorGpu : public MoreAccurateMotionWobbleSuppressorBase
{
public:
void suppress(int idx, const cuda::GpuMat &frame, cuda::GpuMat &result);
virtual void suppress(int idx, const Mat &frame, Mat &result) CV_OVERRIDE;
private:
cuda::GpuMat frameDevice_, resultDevice_;
cuda::GpuMat mapx_, mapy_;
};
#endif
//! @}
} // namespace videostab
} // namespace cv
#endif

View File

@ -1,64 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_VIDEOSTAB_CLP_HPP__
#define __OPENCV_VIDEOSTAB_CLP_HPP__
#ifdef HAVE_CLP
# define COIN_BIG_INDEX 0
# define DEBUG_COIN 0
# define PRESOLVE_DEBUG 0
# define PRESOLVE_CONSISTENCY 0
# include "ClpSimplex.hpp"
# include "ClpPresolve.hpp"
# include "ClpPrimalColumnSteepest.hpp"
# include "ClpDualRowSteepest.hpp"
# define INF 1e10
#endif
// Clp replaces min and max with ?: globally, we can't use std::min and std::max in case
// when HAVE_CLP is true. We create the defines by ourselves when HAVE_CLP == 0.
#undef min
#undef max
#endif

View File

@ -1,117 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#if !defined CUDA_DISABLER
#include <thrust/device_ptr.h>
#include <thrust/remove.h>
#include <thrust/functional.h>
#include "opencv2/core/cuda/common.hpp"
namespace cv { namespace cuda { namespace device { namespace globmotion {
__constant__ float cml[9];
__constant__ float cmr[9];
int compactPoints(int N, float *points0, float *points1, const uchar *mask)
{
thrust::device_ptr<float2> dpoints0((float2*)points0);
thrust::device_ptr<float2> dpoints1((float2*)points1);
thrust::device_ptr<const uchar> dmask(mask);
return (int)(thrust::remove_if(thrust::make_zip_iterator(thrust::make_tuple(dpoints0, dpoints1)),
thrust::make_zip_iterator(thrust::make_tuple(dpoints0 + N, dpoints1 + N)),
dmask, thrust::not1(thrust::identity<uchar>()))
- thrust::make_zip_iterator(make_tuple(dpoints0, dpoints1)));
}
__global__ void calcWobbleSuppressionMapsKernel(
const int left, const int idx, const int right, const int width, const int height,
PtrStepf mapx, PtrStepf mapy)
{
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < width && y < height)
{
float xl = cml[0]*x + cml[1]*y + cml[2];
float yl = cml[3]*x + cml[4]*y + cml[5];
float izl = 1.f / (cml[6]*x + cml[7]*y + cml[8]);
xl *= izl;
yl *= izl;
float xr = cmr[0]*x + cmr[1]*y + cmr[2];
float yr = cmr[3]*x + cmr[4]*y + cmr[5];
float izr = 1.f / (cmr[6]*x + cmr[7]*y + cmr[8]);
xr *= izr;
yr *= izr;
float wl = idx - left;
float wr = right - idx;
mapx(y,x) = (wr * xl + wl * xr) / (wl + wr);
mapy(y,x) = (wr * yl + wl * yr) / (wl + wr);
}
}
void calcWobbleSuppressionMaps(
int left, int idx, int right, int width, int height,
const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy)
{
cudaSafeCall(cudaMemcpyToSymbol(cml, ml, 9*sizeof(float)));
cudaSafeCall(cudaMemcpyToSymbol(cmr, mr, 9*sizeof(float)));
dim3 threads(32, 8);
dim3 grid(divUp(width, threads.x), divUp(height, threads.y));
calcWobbleSuppressionMapsKernel<<<grid, threads>>>(
left, idx, right, width, height, mapx, mapy);
cudaSafeCall(cudaGetLastError());
cudaSafeCall(cudaDeviceSynchronize());
}
}}}}
#endif /* CUDA_DISABLER */

View File

@ -1,141 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videostab/deblurring.hpp"
#include "opencv2/videostab/global_motion.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
namespace cv
{
namespace videostab
{
float calcBlurriness(const Mat &frame)
{
CV_INSTRUMENT_REGION();
Mat Gx, Gy;
Sobel(frame, Gx, CV_32F, 1, 0);
Sobel(frame, Gy, CV_32F, 0, 1);
double normGx = norm(Gx);
double normGy = norm(Gy);
double sumSq = normGx*normGx + normGy*normGy;
return static_cast<float>(1. / (sumSq / frame.size().area() + 1e-6));
}
WeightingDeblurer::WeightingDeblurer()
{
setSensitivity(0.1f);
}
void WeightingDeblurer::deblur(int idx, Mat &frame)
{
CV_INSTRUMENT_REGION();
CV_Assert(frame.type() == CV_8UC3);
bSum_.create(frame.size());
gSum_.create(frame.size());
rSum_.create(frame.size());
wSum_.create(frame.size());
for (int y = 0; y < frame.rows; ++y)
{
for (int x = 0; x < frame.cols; ++x)
{
Point3_<uchar> p = frame.at<Point3_<uchar> >(y,x);
bSum_(y,x) = p.x;
gSum_(y,x) = p.y;
rSum_(y,x) = p.z;
wSum_(y,x) = 1.f;
}
}
for (int k = idx - radius_; k <= idx + radius_; ++k)
{
const Mat &neighbor = at(k, *frames_);
float bRatio = at(idx, *blurrinessRates_) / at(k, *blurrinessRates_);
Mat_<float> M = getMotion(idx, k, *motions_);
if (bRatio > 1.f)
{
for (int y = 0; y < frame.rows; ++y)
{
for (int x = 0; x < frame.cols; ++x)
{
int x1 = cvRound(M(0,0)*x + M(0,1)*y + M(0,2));
int y1 = cvRound(M(1,0)*x + M(1,1)*y + M(1,2));
if (x1 >= 0 && x1 < neighbor.cols && y1 >= 0 && y1 < neighbor.rows)
{
const Point3_<uchar> &p = frame.at<Point3_<uchar> >(y,x);
const Point3_<uchar> &p1 = neighbor.at<Point3_<uchar> >(y1,x1);
float w = bRatio * sensitivity_ /
(sensitivity_ + std::abs(intensity(p1) - intensity(p)));
bSum_(y,x) += w * p1.x;
gSum_(y,x) += w * p1.y;
rSum_(y,x) += w * p1.z;
wSum_(y,x) += w;
}
}
}
}
}
for (int y = 0; y < frame.rows; ++y)
{
for (int x = 0; x < frame.cols; ++x)
{
float wSumInv = 1.f / wSum_(y,x);
frame.at<Point3_<uchar> >(y,x) = Point3_<uchar>(
static_cast<uchar>(bSum_(y,x)*wSumInv),
static_cast<uchar>(gSum_(y,x)*wSumInv),
static_cast<uchar>(rSum_(y,x)*wSumInv));
}
}
}
} // namespace videostab
} // namespace cv

View File

@ -1,141 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videostab/fast_marching.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
namespace cv
{
namespace videostab
{
float FastMarchingMethod::solve(int x1, int y1, int x2, int y2) const
{
float sol = inf_;
if (y1 >=0 && y1 < flag_.rows && x1 >= 0 && x1 < flag_.cols && flag_(y1,x1) == KNOWN)
{
float t1 = dist_(y1,x1);
if (y2 >=0 && y2 < flag_.rows && x2 >= 0 && x2 < flag_.cols && flag_(y2,x2) == KNOWN)
{
float t2 = dist_(y2,x2);
float r = std::sqrt(2 - sqr(t1 - t2));
float s = (t1 + t2 - r) / 2;
if (s >= t1 && s >= t2)
sol = s;
else
{
s += r;
if (s >= t1 && s >= t2)
sol = s;
}
}
else
sol = 1 + t1;
}
else if (y2 >=0 && y2 < flag_.rows && x2 >= 0 && x2 < flag_.cols && flag_(y2,x2) == KNOWN)
sol = 1 + dist_(y2,x1);
return sol;
}
void FastMarchingMethod::heapUp(int idx)
{
int p = (idx-1)/2;
while (idx > 0 && narrowBand_[idx] < narrowBand_[p])
{
std::swap(indexOf(narrowBand_[p]), indexOf(narrowBand_[idx]));
std::swap(narrowBand_[p], narrowBand_[idx]);
idx = p;
p = (idx-1)/2;
}
}
void FastMarchingMethod::heapDown(int idx)
{
int l, r, smallest;
for(;;)
{
l = 2*idx+1;
r = 2*idx+2;
smallest = idx;
if (l < size_ && narrowBand_[l] < narrowBand_[smallest]) smallest = l;
if (r < size_ && narrowBand_[r] < narrowBand_[smallest]) smallest = r;
if (smallest == idx)
break;
else
{
std::swap(indexOf(narrowBand_[idx]), indexOf(narrowBand_[smallest]));
std::swap(narrowBand_[idx], narrowBand_[smallest]);
idx = smallest;
}
}
}
void FastMarchingMethod::heapAdd(const DXY &dxy)
{
if (static_cast<int>(narrowBand_.size()) < size_ + 1)
narrowBand_.resize(size_*2 + 1);
narrowBand_[size_] = dxy;
indexOf(dxy) = size_++;
heapUp(size_-1);
}
void FastMarchingMethod::heapRemoveMin()
{
if (size_ > 0)
{
size_--;
std::swap(indexOf(narrowBand_[0]), indexOf(narrowBand_[size_]));
std::swap(narrowBand_[0], narrowBand_[size_]);
heapDown(0);
}
}
} // namespace videostab
} // namespace cv

View File

@ -1,120 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videostab/frame_source.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
#include "opencv2/opencv_modules.hpp"
#ifdef HAVE_OPENCV_VIDEOIO
# include "opencv2/videoio.hpp"
#endif
namespace cv
{
namespace videostab
{
namespace {
class VideoFileSourceImpl : public IFrameSource
{
public:
VideoFileSourceImpl(const String &path, bool volatileFrame)
: path_(path), volatileFrame_(volatileFrame) { reset(); }
virtual void reset() CV_OVERRIDE
{
#ifdef HAVE_OPENCV_VIDEOIO
vc.release();
vc.open(path_);
if (!vc.isOpened())
CV_Error(0, "can't open file: " + path_);
#else
CV_Error(Error::StsNotImplemented, "OpenCV has been compiled without video I/O support");
#endif
}
virtual Mat nextFrame() CV_OVERRIDE
{
Mat frame;
#ifdef HAVE_OPENCV_VIDEOIO
vc >> frame;
#endif
return volatileFrame_ ? frame : frame.clone();
}
#ifdef HAVE_OPENCV_VIDEOIO
int width() {return static_cast<int>(vc.get(CAP_PROP_FRAME_WIDTH));}
int height() {return static_cast<int>(vc.get(CAP_PROP_FRAME_HEIGHT));}
int count() {return static_cast<int>(vc.get(CAP_PROP_FRAME_COUNT));}
double fps() {return vc.get(CAP_PROP_FPS);}
#else
int width() {return 0;}
int height() {return 0;}
int count() {return 0;}
double fps() {return 0;}
#endif
private:
String path_;
bool volatileFrame_;
#ifdef HAVE_OPENCV_VIDEOIO
VideoCapture vc;
#endif
};
}//namespace
VideoFileSource::VideoFileSource(const String &path, bool volatileFrame)
: impl(new VideoFileSourceImpl(path, volatileFrame)) {}
void VideoFileSource::reset() { impl->reset(); }
Mat VideoFileSource::nextFrame() { return impl->nextFrame(); }
int VideoFileSource::width() { return ((VideoFileSourceImpl*)impl.get())->width(); }
int VideoFileSource::height() { return ((VideoFileSourceImpl*)impl.get())->height(); }
int VideoFileSource::count() { return ((VideoFileSourceImpl*)impl.get())->count(); }
double VideoFileSource::fps() { return ((VideoFileSourceImpl*)impl.get())->fps(); }
} // namespace videostab
} // namespace cv

View File

@ -1,882 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videostab/global_motion.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
#include "opencv2/videostab/outlier_rejection.hpp"
#include "opencv2/opencv_modules.hpp"
#include "clp.hpp"
#include "opencv2/core/private.cuda.hpp"
#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
namespace cv { namespace cuda {
static void compactPoints(GpuMat&, GpuMat&, const GpuMat&) { throw_no_cuda(); }
}}
#else
namespace cv { namespace cuda { namespace device { namespace globmotion {
int compactPoints(int N, float *points0, float *points1, const uchar *mask);
}}}}
namespace cv { namespace cuda {
static void compactPoints(GpuMat &points0, GpuMat &points1, const GpuMat &mask)
{
CV_Assert(points0.rows == 1 && points1.rows == 1 && mask.rows == 1);
CV_Assert(points0.type() == CV_32FC2 && points1.type() == CV_32FC2 && mask.type() == CV_8U);
CV_Assert(points0.cols == mask.cols && points1.cols == mask.cols);
int npoints = points0.cols;
int remaining = cv::cuda::device::globmotion::compactPoints(
npoints, (float*)points0.data, (float*)points1.data, mask.data);
points0 = points0.colRange(0, remaining);
points1 = points1.colRange(0, remaining);
}
}}
#endif
#endif
namespace cv
{
namespace videostab
{
// does isotropic normalization
static Mat normalizePoints(int npoints, Point2f *points)
{
float cx = 0.f, cy = 0.f;
for (int i = 0; i < npoints; ++i)
{
cx += points[i].x;
cy += points[i].y;
}
cx /= npoints;
cy /= npoints;
float d = 0.f;
for (int i = 0; i < npoints; ++i)
{
points[i].x -= cx;
points[i].y -= cy;
d += std::sqrt(sqr(points[i].x) + sqr(points[i].y));
}
d /= npoints;
float s = std::sqrt(2.f) / d;
for (int i = 0; i < npoints; ++i)
{
points[i].x *= s;
points[i].y *= s;
}
Mat_<float> T = Mat::eye(3, 3, CV_32F);
T(0,0) = T(1,1) = s;
T(0,2) = -cx*s;
T(1,2) = -cy*s;
return T;
}
static Mat estimateGlobMotionLeastSquaresTranslation(
int npoints, Point2f *points0, Point2f *points1, float *rmse)
{
Mat_<float> M = Mat::eye(3, 3, CV_32F);
for (int i = 0; i < npoints; ++i)
{
M(0,2) += points1[i].x - points0[i].x;
M(1,2) += points1[i].y - points0[i].y;
}
M(0,2) /= npoints;
M(1,2) /= npoints;
if (rmse)
{
*rmse = 0;
for (int i = 0; i < npoints; ++i)
*rmse += sqr(points1[i].x - points0[i].x - M(0,2)) +
sqr(points1[i].y - points0[i].y - M(1,2));
*rmse = std::sqrt(*rmse / npoints);
}
return M;
}
static Mat estimateGlobMotionLeastSquaresTranslationAndScale(
int npoints, Point2f *points0, Point2f *points1, float *rmse)
{
Mat_<float> T0 = normalizePoints(npoints, points0);
Mat_<float> T1 = normalizePoints(npoints, points1);
Mat_<float> A(2*npoints, 3), b(2*npoints, 1);
float *a0, *a1;
Point2f p0, p1;
for (int i = 0; i < npoints; ++i)
{
a0 = A[2*i];
a1 = A[2*i+1];
p0 = points0[i];
p1 = points1[i];
a0[0] = p0.x; a0[1] = 1; a0[2] = 0;
a1[0] = p0.y; a1[1] = 0; a1[2] = 1;
b(2*i,0) = p1.x;
b(2*i+1,0) = p1.y;
}
Mat_<float> sol;
solve(A, b, sol, DECOMP_NORMAL | DECOMP_LU);
if (rmse)
*rmse = static_cast<float>(norm(A*sol, b, NORM_L2) / std::sqrt(static_cast<double>(npoints)));
Mat_<float> M = Mat::eye(3, 3, CV_32F);
M(0,0) = M(1,1) = sol(0,0);
M(0,2) = sol(1,0);
M(1,2) = sol(2,0);
return T1.inv() * M * T0;
}
static Mat estimateGlobMotionLeastSquaresRotation(
int npoints, Point2f *points0, Point2f *points1, float *rmse)
{
Point2f p0, p1;
float A(0), B(0);
for(int i=0; i<npoints; ++i)
{
p0 = points0[i];
p1 = points1[i];
A += p0.x*p1.x + p0.y*p1.y;
B += p0.x*p1.y - p1.x*p0.y;
}
// A*sin(alpha) + B*cos(alpha) = 0
float C = std::sqrt(A*A + B*B);
Mat_<float> M = Mat::eye(3, 3, CV_32F);
if ( C != 0 )
{
float sinAlpha = - B / C;
float cosAlpha = A / C;
M(0,0) = cosAlpha;
M(1,1) = M(0,0);
M(0,1) = sinAlpha;
M(1,0) = - M(0,1);
}
if (rmse)
{
*rmse = 0;
for (int i = 0; i < npoints; ++i)
{
p0 = points0[i];
p1 = points1[i];
*rmse += sqr(p1.x - M(0,0)*p0.x - M(0,1)*p0.y) +
sqr(p1.y - M(1,0)*p0.x - M(1,1)*p0.y);
}
*rmse = std::sqrt(*rmse / npoints);
}
return M;
}
static Mat estimateGlobMotionLeastSquaresRigid(
int npoints, Point2f *points0, Point2f *points1, float *rmse)
{
Point2f mean0(0.f, 0.f);
Point2f mean1(0.f, 0.f);
for (int i = 0; i < npoints; ++i)
{
mean0 += points0[i];
mean1 += points1[i];
}
mean0 *= 1.f / npoints;
mean1 *= 1.f / npoints;
Mat_<float> A = Mat::zeros(2, 2, CV_32F);
Point2f pt0, pt1;
for (int i = 0; i < npoints; ++i)
{
pt0 = points0[i] - mean0;
pt1 = points1[i] - mean1;
A(0,0) += pt1.x * pt0.x;
A(0,1) += pt1.x * pt0.y;
A(1,0) += pt1.y * pt0.x;
A(1,1) += pt1.y * pt0.y;
}
Mat_<float> M = Mat::eye(3, 3, CV_32F);
SVD svd(A);
Mat_<float> R = svd.u * svd.vt;
Mat tmp(M(Rect(0,0,2,2)));
R.copyTo(tmp);
M(0,2) = mean1.x - R(0,0)*mean0.x - R(0,1)*mean0.y;
M(1,2) = mean1.y - R(1,0)*mean0.x - R(1,1)*mean0.y;
if (rmse)
{
*rmse = 0;
for (int i = 0; i < npoints; ++i)
{
pt0 = points0[i];
pt1 = points1[i];
*rmse += sqr(pt1.x - M(0,0)*pt0.x - M(0,1)*pt0.y - M(0,2)) +
sqr(pt1.y - M(1,0)*pt0.x - M(1,1)*pt0.y - M(1,2));
}
*rmse = std::sqrt(*rmse / npoints);
}
return M;
}
static Mat estimateGlobMotionLeastSquaresSimilarity(
int npoints, Point2f *points0, Point2f *points1, float *rmse)
{
Mat_<float> T0 = normalizePoints(npoints, points0);
Mat_<float> T1 = normalizePoints(npoints, points1);
Mat_<float> A(2*npoints, 4), b(2*npoints, 1);
float *a0, *a1;
Point2f p0, p1;
for (int i = 0; i < npoints; ++i)
{
a0 = A[2*i];
a1 = A[2*i+1];
p0 = points0[i];
p1 = points1[i];
a0[0] = p0.x; a0[1] = p0.y; a0[2] = 1; a0[3] = 0;
a1[0] = p0.y; a1[1] = -p0.x; a1[2] = 0; a1[3] = 1;
b(2*i,0) = p1.x;
b(2*i+1,0) = p1.y;
}
Mat_<float> sol;
solve(A, b, sol, DECOMP_NORMAL | DECOMP_LU);
if (rmse)
*rmse = static_cast<float>(norm(A*sol, b, NORM_L2) / std::sqrt(static_cast<double>(npoints)));
Mat_<float> M = Mat::eye(3, 3, CV_32F);
M(0,0) = M(1,1) = sol(0,0);
M(0,1) = sol(1,0);
M(1,0) = -sol(1,0);
M(0,2) = sol(2,0);
M(1,2) = sol(3,0);
return T1.inv() * M * T0;
}
static Mat estimateGlobMotionLeastSquaresAffine(
int npoints, Point2f *points0, Point2f *points1, float *rmse)
{
Mat_<float> T0 = normalizePoints(npoints, points0);
Mat_<float> T1 = normalizePoints(npoints, points1);
Mat_<float> A(2*npoints, 6), b(2*npoints, 1);
float *a0, *a1;
Point2f p0, p1;
for (int i = 0; i < npoints; ++i)
{
a0 = A[2*i];
a1 = A[2*i+1];
p0 = points0[i];
p1 = points1[i];
a0[0] = p0.x; a0[1] = p0.y; a0[2] = 1; a0[3] = a0[4] = a0[5] = 0;
a1[0] = a1[1] = a1[2] = 0; a1[3] = p0.x; a1[4] = p0.y; a1[5] = 1;
b(2*i,0) = p1.x;
b(2*i+1,0) = p1.y;
}
Mat_<float> sol;
solve(A, b, sol, DECOMP_NORMAL | DECOMP_LU);
if (rmse)
*rmse = static_cast<float>(norm(A*sol, b, NORM_L2) / std::sqrt(static_cast<double>(npoints)));
Mat_<float> M = Mat::eye(3, 3, CV_32F);
for (int i = 0, k = 0; i < 2; ++i)
for (int j = 0; j < 3; ++j, ++k)
M(i,j) = sol(k,0);
return T1.inv() * M * T0;
}
Mat estimateGlobalMotionLeastSquares(
InputOutputArray points0, InputOutputArray points1, int model, float *rmse)
{
CV_INSTRUMENT_REGION();
CV_Assert(model <= MM_AFFINE);
CV_Assert(points0.type() == points1.type());
const int npoints = points0.getMat().checkVector(2);
CV_Assert(points1.getMat().checkVector(2) == npoints);
typedef Mat (*Impl)(int, Point2f*, Point2f*, float*);
static Impl impls[] = { estimateGlobMotionLeastSquaresTranslation,
estimateGlobMotionLeastSquaresTranslationAndScale,
estimateGlobMotionLeastSquaresRotation,
estimateGlobMotionLeastSquaresRigid,
estimateGlobMotionLeastSquaresSimilarity,
estimateGlobMotionLeastSquaresAffine };
Point2f *points0_ = points0.getMat().ptr<Point2f>();
Point2f *points1_ = points1.getMat().ptr<Point2f>();
return impls[model](npoints, points0_, points1_, rmse);
}
Mat estimateGlobalMotionRansac(
InputArray points0, InputArray points1, int model, const RansacParams &params,
float *rmse, int *ninliers)
{
CV_INSTRUMENT_REGION();
CV_Assert(model <= MM_AFFINE);
CV_Assert(points0.type() == points1.type());
const int npoints = points0.getMat().checkVector(2);
CV_Assert(points1.getMat().checkVector(2) == npoints);
if (npoints < params.size)
return Mat::eye(3, 3, CV_32F);
const Point2f *points0_ = points0.getMat().ptr<Point2f>();
const Point2f *points1_ = points1.getMat().ptr<Point2f>();
const int niters = params.niters();
// current hypothesis
std::vector<int> indices(params.size);
std::vector<Point2f> subset0(params.size);
std::vector<Point2f> subset1(params.size);
// best hypothesis
std::vector<int> bestIndices(params.size);
Mat_<float> bestM;
int ninliersMax = -1;
RNG rng(0);
Point2f p0, p1;
float x, y;
for (int iter = 0; iter < niters; ++iter)
{
for (int i = 0; i < params.size; ++i)
{
bool ok = false;
while (!ok)
{
ok = true;
indices[i] = static_cast<unsigned>(rng) % npoints;
for (int j = 0; j < i; ++j)
if (indices[i] == indices[j])
{ ok = false; break; }
}
}
for (int i = 0; i < params.size; ++i)
{
subset0[i] = points0_[indices[i]];
subset1[i] = points1_[indices[i]];
}
Mat_<float> M = estimateGlobalMotionLeastSquares(subset0, subset1, model, 0);
int numinliers = 0;
for (int i = 0; i < npoints; ++i)
{
p0 = points0_[i];
p1 = points1_[i];
x = M(0,0)*p0.x + M(0,1)*p0.y + M(0,2);
y = M(1,0)*p0.x + M(1,1)*p0.y + M(1,2);
if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
numinliers++;
}
if (numinliers >= ninliersMax)
{
bestM = M;
ninliersMax = numinliers;
bestIndices.swap(indices);
}
}
if (ninliersMax < params.size)
{
// compute RMSE
for (int i = 0; i < params.size; ++i)
{
subset0[i] = points0_[bestIndices[i]];
subset1[i] = points1_[bestIndices[i]];
}
bestM = estimateGlobalMotionLeastSquares(subset0, subset1, model, rmse);
}
else
{
subset0.resize(ninliersMax);
subset1.resize(ninliersMax);
for (int i = 0, j = 0; i < npoints && j < ninliersMax ; ++i)
{
p0 = points0_[i];
p1 = points1_[i];
x = bestM(0,0)*p0.x + bestM(0,1)*p0.y + bestM(0,2);
y = bestM(1,0)*p0.x + bestM(1,1)*p0.y + bestM(1,2);
if (sqr(x - p1.x) + sqr(y - p1.y) < params.thresh * params.thresh)
{
subset0[j] = p0;
subset1[j] = p1;
j++;
}
}
bestM = estimateGlobalMotionLeastSquares(subset0, subset1, model, rmse);
}
if (ninliers)
*ninliers = ninliersMax;
return bestM;
}
MotionEstimatorRansacL2::MotionEstimatorRansacL2(MotionModel model)
: MotionEstimatorBase(model)
{
setRansacParams(RansacParams::default2dMotion(model));
setMinInlierRatio(0.1f);
}
Mat MotionEstimatorRansacL2::estimate(InputArray points0, InputArray points1, bool *ok)
{
CV_Assert(points0.type() == points1.type());
const int npoints = points0.getMat().checkVector(2);
CV_Assert(points1.getMat().checkVector(2) == npoints);
// find motion
int ninliers = 0;
Mat_<float> M;
if (motionModel() != MM_HOMOGRAPHY)
M = estimateGlobalMotionRansac(
points0, points1, motionModel(), ransacParams_, 0, &ninliers);
else
{
std::vector<uchar> mask;
M = findHomography(points0, points1, mask, LMEDS);
for (int i = 0; i < npoints; ++i)
if (mask[i]) ninliers++;
}
// check if we're confident enough in estimated motion
if (ok) *ok = true;
if (static_cast<float>(ninliers) / npoints < minInlierRatio_)
{
M = Mat::eye(3, 3, CV_32F);
if (ok) *ok = false;
}
return M;
}
MotionEstimatorL1::MotionEstimatorL1(MotionModel model)
: MotionEstimatorBase(model)
{
}
// TODO will estimation of all motions as one LP problem be faster?
Mat MotionEstimatorL1::estimate(InputArray points0, InputArray points1, bool *ok)
{
CV_Assert(points0.type() == points1.type());
const int npoints = points0.getMat().checkVector(2);
CV_Assert(points1.getMat().checkVector(2) == npoints);
#ifndef HAVE_CLP
CV_UNUSED(ok);
CV_Error(Error::StsError, "The library is built without Clp support");
#else
CV_Assert(motionModel() <= MM_AFFINE && motionModel() != MM_RIGID);
if(npoints <= 0)
return Mat::eye(3, 3, CV_32F);
// prepare LP problem
const Point2f *points0_ = points0.getMat().ptr<Point2f>();
const Point2f *points1_ = points1.getMat().ptr<Point2f>();
int ncols = 6 + 2*npoints;
int nrows = 4*npoints;
if (motionModel() == MM_SIMILARITY)
nrows += 2;
else if (motionModel() == MM_TRANSLATION_AND_SCALE)
nrows += 3;
else if (motionModel() == MM_TRANSLATION)
nrows += 4;
rows_.clear();
cols_.clear();
elems_.clear();
obj_.assign(ncols, 0);
collb_.assign(ncols, -INF);
colub_.assign(ncols, INF);
int c = 6;
for (int i = 0; i < npoints; ++i, c += 2)
{
obj_[c] = 1;
collb_[c] = 0;
obj_[c+1] = 1;
collb_[c+1] = 0;
}
elems_.clear();
rowlb_.assign(nrows, -INF);
rowub_.assign(nrows, INF);
int r = 0;
Point2f p0, p1;
for (int i = 0; i < npoints; ++i, r += 4)
{
p0 = points0_[i];
p1 = points1_[i];
set(r, 0, p0.x); set(r, 1, p0.y); set(r, 2, 1); set(r, 6+2*i, -1);
rowub_[r] = p1.x;
set(r+1, 3, p0.x); set(r+1, 4, p0.y); set(r+1, 5, 1); set(r+1, 6+2*i+1, -1);
rowub_[r+1] = p1.y;
set(r+2, 0, p0.x); set(r+2, 1, p0.y); set(r+2, 2, 1); set(r+2, 6+2*i, 1);
rowlb_[r+2] = p1.x;
set(r+3, 3, p0.x); set(r+3, 4, p0.y); set(r+3, 5, 1); set(r+3, 6+2*i+1, 1);
rowlb_[r+3] = p1.y;
}
if (motionModel() == MM_SIMILARITY)
{
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
set(r+1, 1, 1); set(r+1, 3, 1); rowlb_[r+1] = rowub_[r+1] = 0;
}
else if (motionModel() == MM_TRANSLATION_AND_SCALE)
{
set(r, 0, 1); set(r, 4, -1); rowlb_[r] = rowub_[r] = 0;
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
}
else if (motionModel() == MM_TRANSLATION)
{
set(r, 0, 1); rowlb_[r] = rowub_[r] = 1;
set(r+1, 1, 1); rowlb_[r+1] = rowub_[r+1] = 0;
set(r+2, 3, 1); rowlb_[r+2] = rowub_[r+2] = 0;
set(r+3, 4, 1); rowlb_[r+3] = rowub_[r+3] = 1;
}
// solve
CoinPackedMatrix A(true, &rows_[0], &cols_[0], &elems_[0], elems_.size());
A.setDimensions(nrows, ncols);
ClpSimplex model(false);
model.loadProblem(A, &collb_[0], &colub_[0], &obj_[0], &rowlb_[0], &rowub_[0]);
ClpDualRowSteepest dualSteep(1);
model.setDualRowPivotAlgorithm(dualSteep);
model.scaling(1);
model.dual();
// extract motion
const double *sol = model.getColSolution();
Mat_<float> M = Mat::eye(3, 3, CV_32F);
M(0,0) = sol[0];
M(0,1) = sol[1];
M(0,2) = sol[2];
M(1,0) = sol[3];
M(1,1) = sol[4];
M(1,2) = sol[5];
if (ok) *ok = true;
return M;
#endif
}
FromFileMotionReader::FromFileMotionReader(const String &path)
: ImageMotionEstimatorBase(MM_UNKNOWN)
{
file_.open(path.c_str());
CV_Assert(file_.is_open());
}
Mat FromFileMotionReader::estimate(const Mat &/*frame0*/, const Mat &/*frame1*/, bool *ok)
{
Mat_<float> M(3, 3);
bool ok_;
file_ >> M(0,0) >> M(0,1) >> M(0,2)
>> M(1,0) >> M(1,1) >> M(1,2)
>> M(2,0) >> M(2,1) >> M(2,2) >> ok_;
if (ok) *ok = ok_;
return M;
}
ToFileMotionWriter::ToFileMotionWriter(const String &path, Ptr<ImageMotionEstimatorBase> estimator)
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{
file_.open(path.c_str());
CV_Assert(file_.is_open());
}
Mat ToFileMotionWriter::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{
bool ok_;
Mat_<float> M = motionEstimator_->estimate(frame0, frame1, &ok_);
file_ << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << " " << ok_ << std::endl;
if (ok) *ok = ok_;
return M;
}
KeypointBasedMotionEstimator::KeypointBasedMotionEstimator(Ptr<MotionEstimatorBase> estimator)
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{
setDetector(GFTTDetector::create());
setOpticalFlowEstimator(makePtr<SparsePyrLkOptFlowEstimator>());
setOutlierRejector(makePtr<NullOutlierRejector>());
}
Mat KeypointBasedMotionEstimator::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{
InputArray image0 = frame0;
InputArray image1 = frame1;
return estimate(image0, image1, ok);
}
Mat KeypointBasedMotionEstimator::estimate(InputArray frame0, InputArray frame1, bool *ok)
{
// find keypoints
detector_->detect(frame0, keypointsPrev_);
if (keypointsPrev_.empty())
return Mat::eye(3, 3, CV_32F);
// extract points from keypoints
pointsPrev_.resize(keypointsPrev_.size());
for (size_t i = 0; i < keypointsPrev_.size(); ++i)
pointsPrev_[i] = keypointsPrev_[i].pt;
// find correspondences
optFlowEstimator_->run(frame0, frame1, pointsPrev_, points_, status_, noArray());
// leave good correspondences only
pointsPrevGood_.clear(); pointsPrevGood_.reserve(points_.size());
pointsGood_.clear(); pointsGood_.reserve(points_.size());
for (size_t i = 0; i < points_.size(); ++i)
{
if (status_[i])
{
pointsPrevGood_.push_back(pointsPrev_[i]);
pointsGood_.push_back(points_[i]);
}
}
// perform outlier rejection
IOutlierRejector *outlRejector = outlierRejector_.get();
if (!dynamic_cast<NullOutlierRejector*>(outlRejector))
{
pointsPrev_.swap(pointsPrevGood_);
points_.swap(pointsGood_);
outlierRejector_->process(frame0.size(), pointsPrev_, points_, status_);
pointsPrevGood_.clear();
pointsPrevGood_.reserve(points_.size());
pointsGood_.clear();
pointsGood_.reserve(points_.size());
for (size_t i = 0; i < points_.size(); ++i)
{
if (status_[i])
{
pointsPrevGood_.push_back(pointsPrev_[i]);
pointsGood_.push_back(points_[i]);
}
}
}
// estimate motion
return motionEstimator_->estimate(pointsPrevGood_, pointsGood_, ok);
}
#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
KeypointBasedMotionEstimatorGpu::KeypointBasedMotionEstimatorGpu(Ptr<MotionEstimatorBase> estimator)
: ImageMotionEstimatorBase(estimator->motionModel()), motionEstimator_(estimator)
{
detector_ = cuda::createGoodFeaturesToTrackDetector(CV_8UC1);
CV_Assert(cuda::getCudaEnabledDeviceCount() > 0);
setOutlierRejector(makePtr<NullOutlierRejector>());
}
Mat KeypointBasedMotionEstimatorGpu::estimate(const Mat &frame0, const Mat &frame1, bool *ok)
{
frame0_.upload(frame0);
frame1_.upload(frame1);
return estimate(frame0_, frame1_, ok);
}
Mat KeypointBasedMotionEstimatorGpu::estimate(const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, bool *ok)
{
// convert frame to gray if it's color
cuda::GpuMat grayFrame0;
if (frame0.channels() == 1)
grayFrame0 = frame0;
else
{
cuda::cvtColor(frame0, grayFrame0_, COLOR_BGR2GRAY);
grayFrame0 = grayFrame0_;
}
// find keypoints
detector_->detect(grayFrame0, pointsPrev_);
// find correspondences
optFlowEstimator_.run(frame0, frame1, pointsPrev_, points_, status_);
// leave good correspondences only
cuda::compactPoints(pointsPrev_, points_, status_);
pointsPrev_.download(hostPointsPrev_);
points_.download(hostPoints_);
// perform outlier rejection
IOutlierRejector *rejector = outlierRejector_.get();
if (!dynamic_cast<NullOutlierRejector*>(rejector))
{
outlierRejector_->process(frame0.size(), hostPointsPrev_, hostPoints_, rejectionStatus_);
hostPointsPrevTmp_.clear();
hostPointsPrevTmp_.reserve(hostPoints_.cols);
hostPointsTmp_.clear();
hostPointsTmp_.reserve(hostPoints_.cols);
for (int i = 0; i < hostPoints_.cols; ++i)
{
if (rejectionStatus_[i])
{
hostPointsPrevTmp_.push_back(hostPointsPrev_.at<Point2f>(0,i));
hostPointsTmp_.push_back(hostPoints_.at<Point2f>(0,i));
}
}
hostPointsPrev_ = Mat(1, (int)hostPointsPrevTmp_.size(), CV_32FC2, &hostPointsPrevTmp_[0]);
hostPoints_ = Mat(1, (int)hostPointsTmp_.size(), CV_32FC2, &hostPointsTmp_[0]);
}
// estimate motion
return motionEstimator_->estimate(hostPointsPrev_, hostPoints_, ok);
}
#endif // defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
Mat getMotion(int from, int to, const std::vector<Mat> &motions)
{
CV_INSTRUMENT_REGION();
Mat M = Mat::eye(3, 3, CV_32F);
if (to > from)
{
for (int i = from; i < to; ++i)
M = at(i, motions) * M;
}
else if (from > to)
{
for (int i = to; i < from; ++i)
M = at(i, motions) * M;
M = M.inv();
}
return M;
}
} // namespace videostab
} // namespace cv

View File

@ -1,560 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <queue>
#include "opencv2/videostab/inpainting.hpp"
#include "opencv2/videostab/global_motion.hpp"
#include "opencv2/videostab/fast_marching.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
#include "opencv2/opencv_modules.hpp"
namespace cv
{
namespace videostab
{
void InpaintingPipeline::setRadius(int val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setRadius(val);
InpainterBase::setRadius(val);
}
void InpaintingPipeline::setFrames(const std::vector<Mat> &val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setFrames(val);
InpainterBase::setFrames(val);
}
void InpaintingPipeline::setMotionModel(MotionModel val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setMotionModel(val);
InpainterBase::setMotionModel(val);
}
void InpaintingPipeline::setMotions(const std::vector<Mat> &val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setMotions(val);
InpainterBase::setMotions(val);
}
void InpaintingPipeline::setStabilizedFrames(const std::vector<Mat> &val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setStabilizedFrames(val);
InpainterBase::setStabilizedFrames(val);
}
void InpaintingPipeline::setStabilizationMotions(const std::vector<Mat> &val)
{
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->setStabilizationMotions(val);
InpainterBase::setStabilizationMotions(val);
}
void InpaintingPipeline::inpaint(int idx, Mat &frame, Mat &mask)
{
CV_INSTRUMENT_REGION();
for (size_t i = 0; i < inpainters_.size(); ++i)
inpainters_[i]->inpaint(idx, frame, mask);
}
struct Pixel3
{
float intens;
Point3_<uchar> color;
bool operator <(const Pixel3 &other) const { return intens < other.intens; }
};
ConsistentMosaicInpainter::ConsistentMosaicInpainter()
{
setStdevThresh(20.f);
}
void ConsistentMosaicInpainter::inpaint(int idx, Mat &frame, Mat &mask)
{
CV_INSTRUMENT_REGION();
CV_Assert(frame.type() == CV_8UC3);
CV_Assert(mask.size() == frame.size() && mask.type() == CV_8U);
Mat invS = at(idx, *stabilizationMotions_).inv();
std::vector<Mat_<float> > vmotions(2*radius_ + 1);
for (int i = -radius_; i <= radius_; ++i)
vmotions[radius_ + i] = getMotion(idx, idx + i, *motions_) * invS;
int n;
float mean, var;
std::vector<Pixel3> pixels(2*radius_ + 1);
Mat_<Point3_<uchar> > frame_(frame);
Mat_<uchar> mask_(mask);
for (int y = 0; y < mask.rows; ++y)
{
for (int x = 0; x < mask.cols; ++x)
{
if (!mask_(y, x))
{
n = 0;
mean = 0;
var = 0;
for (int i = -radius_; i <= radius_; ++i)
{
const Mat_<Point3_<uchar> > &framei = at(idx + i, *frames_);
const Mat_<float> &Mi = vmotions[radius_ + i];
int xi = cvRound(Mi(0,0)*x + Mi(0,1)*y + Mi(0,2));
int yi = cvRound(Mi(1,0)*x + Mi(1,1)*y + Mi(1,2));
if (xi >= 0 && xi < framei.cols && yi >= 0 && yi < framei.rows)
{
pixels[n].color = framei(yi, xi);
mean += pixels[n].intens = intensity(pixels[n].color);
n++;
}
}
if (n > 0)
{
mean /= n;
for (int i = 0; i < n; ++i)
var += sqr(pixels[i].intens - mean);
var /= std::max(n - 1, 1);
if (var < stdevThresh_ * stdevThresh_)
{
std::sort(pixels.begin(), pixels.begin() + n);
int nh = (n-1)/2;
int c1 = pixels[nh].color.x;
int c2 = pixels[nh].color.y;
int c3 = pixels[nh].color.z;
if (n-2*nh)
{
c1 = (c1 + pixels[nh].color.x) / 2;
c2 = (c2 + pixels[nh].color.y) / 2;
c3 = (c3 + pixels[nh].color.z) / 2;
}
frame_(y, x) = Point3_<uchar>(
static_cast<uchar>(c1),
static_cast<uchar>(c2),
static_cast<uchar>(c3));
mask_(y, x) = 255;
}
}
}
}
}
}
static float alignementError(
const Mat &M, const Mat &frame0, const Mat &mask0, const Mat &frame1)
{
CV_Assert(frame0.type() == CV_8UC3 && frame1.type() == CV_8UC3);
CV_Assert(mask0.type() == CV_8U && mask0.size() == frame0.size());
CV_Assert(frame0.size() == frame1.size());
CV_Assert(M.size() == Size(3,3) && M.type() == CV_32F);
Mat_<uchar> mask0_(mask0);
Mat_<float> M_(M);
float err = 0;
for (int y0 = 0; y0 < frame0.rows; ++y0)
{
for (int x0 = 0; x0 < frame0.cols; ++x0)
{
if (mask0_(y0,x0))
{
int x1 = cvRound(M_(0,0)*x0 + M_(0,1)*y0 + M_(0,2));
int y1 = cvRound(M_(1,0)*x0 + M_(1,1)*y0 + M_(1,2));
if (y1 >= 0 && y1 < frame1.rows && x1 >= 0 && x1 < frame1.cols)
err += std::abs(intensity(frame1.at<Point3_<uchar> >(y1,x1)) -
intensity(frame0.at<Point3_<uchar> >(y0,x0)));
}
}
}
return err;
}
class MotionInpaintBody
{
public:
void operator ()(int x, int y)
{
float uEst = 0.f, vEst = 0.f, wSum = 0.f;
for (int dy = -rad; dy <= rad; ++dy)
{
for (int dx = -rad; dx <= rad; ++dx)
{
int qx0 = x + dx;
int qy0 = y + dy;
if (qy0 >= 0 && qy0 < mask0.rows && qx0 >= 0 && qx0 < mask0.cols && mask0(qy0,qx0))
{
int qx1 = cvRound(qx0 + flowX(qy0,qx0));
int qy1 = cvRound(qy0 + flowY(qy0,qx0));
int px1 = qx1 - dx;
int py1 = qy1 - dy;
if (qx1 >= 0 && qx1 < mask1.cols && qy1 >= 0 && qy1 < mask1.rows && mask1(qy1,qx1) &&
px1 >= 0 && px1 < mask1.cols && py1 >= 0 && py1 < mask1.rows && mask1(py1,px1))
{
float dudx = 0.f, dvdx = 0.f, dudy = 0.f, dvdy = 0.f;
if (qx0 > 0 && mask0(qy0,qx0-1))
{
if (qx0+1 < mask0.cols && mask0(qy0,qx0+1))
{
dudx = (flowX(qy0,qx0+1) - flowX(qy0,qx0-1)) * 0.5f;
dvdx = (flowY(qy0,qx0+1) - flowY(qy0,qx0-1)) * 0.5f;
}
else
{
dudx = flowX(qy0,qx0) - flowX(qy0,qx0-1);
dvdx = flowY(qy0,qx0) - flowY(qy0,qx0-1);
}
}
else if (qx0+1 < mask0.cols && mask0(qy0,qx0+1))
{
dudx = flowX(qy0,qx0+1) - flowX(qy0,qx0);
dvdx = flowY(qy0,qx0+1) - flowY(qy0,qx0);
}
if (qy0 > 0 && mask0(qy0-1,qx0))
{
if (qy0+1 < mask0.rows && mask0(qy0+1,qx0))
{
dudy = (flowX(qy0+1,qx0) - flowX(qy0-1,qx0)) * 0.5f;
dvdy = (flowY(qy0+1,qx0) - flowY(qy0-1,qx0)) * 0.5f;
}
else
{
dudy = flowX(qy0,qx0) - flowX(qy0-1,qx0);
dvdy = flowY(qy0,qx0) - flowY(qy0-1,qx0);
}
}
else if (qy0+1 < mask0.rows && mask0(qy0+1,qx0))
{
dudy = flowX(qy0+1,qx0) - flowX(qy0,qx0);
dvdy = flowY(qy0+1,qx0) - flowY(qy0,qx0);
}
Point3_<uchar> cp = frame1(py1,px1), cq = frame1(qy1,qx1);
float distColor = sqr(static_cast<float>(cp.x-cq.x))
+ sqr(static_cast<float>(cp.y-cq.y))
+ sqr(static_cast<float>(cp.z-cq.z));
float w = 1.f / (std::sqrt(distColor * (dx*dx + dy*dy)) + eps);
uEst += w * (flowX(qy0,qx0) - dudx*dx - dudy*dy);
vEst += w * (flowY(qy0,qx0) - dvdx*dx - dvdy*dy);
wSum += w;
}
}
}
}
if (wSum > 0.f)
{
flowX(y,x) = uEst / wSum;
flowY(y,x) = vEst / wSum;
mask0(y,x) = 255;
}
}
Mat_<Point3_<uchar> > frame1;
Mat_<uchar> mask0, mask1;
Mat_<float> flowX, flowY;
float eps;
int rad;
};
#ifdef _MSC_VER
#pragma warning(disable: 4702) // unreachable code
#endif
MotionInpainter::MotionInpainter()
{
#ifdef HAVE_OPENCV_CUDAOPTFLOW
setOptFlowEstimator(makePtr<DensePyrLkOptFlowEstimatorGpu>());
setFlowErrorThreshold(1e-4f);
setDistThreshold(5.f);
setBorderMode(BORDER_REPLICATE);
#else
CV_Error(Error::StsNotImplemented, "Current implementation of MotionInpainter requires CUDA");
#endif
}
void MotionInpainter::inpaint(int idx, Mat &frame, Mat &mask)
{
CV_INSTRUMENT_REGION();
std::priority_queue<std::pair<float,int> > neighbors;
std::vector<Mat> vmotions(2*radius_ + 1);
for (int i = -radius_; i <= radius_; ++i)
{
Mat motion0to1 = getMotion(idx, idx + i, *motions_) * at(idx, *stabilizationMotions_).inv();
vmotions[radius_ + i] = motion0to1;
if (i != 0)
{
float err = alignementError(motion0to1, frame, mask, at(idx + i, *frames_));
neighbors.push(std::make_pair(-err, idx + i));
}
}
if (mask1_.size() != mask.size())
{
mask1_.create(mask.size());
mask1_.setTo(255);
}
cvtColor(frame, grayFrame_, COLOR_BGR2GRAY);
MotionInpaintBody body;
body.rad = 2;
body.eps = 1e-4f;
while (!neighbors.empty())
{
int neighbor = neighbors.top().second;
neighbors.pop();
Mat motion1to0 = vmotions[radius_ + neighbor - idx].inv();
// warp frame
frame1_ = at(neighbor, *frames_);
if (motionModel_ != MM_HOMOGRAPHY)
warpAffine(
frame1_, transformedFrame1_, motion1to0(Rect(0,0,3,2)), frame1_.size(),
INTER_LINEAR, borderMode_);
else
warpPerspective(
frame1_, transformedFrame1_, motion1to0, frame1_.size(), INTER_LINEAR,
borderMode_);
cvtColor(transformedFrame1_, transformedGrayFrame1_, COLOR_BGR2GRAY);
// warp mask
if (motionModel_ != MM_HOMOGRAPHY)
warpAffine(
mask1_, transformedMask1_, motion1to0(Rect(0,0,3,2)), mask1_.size(),
INTER_NEAREST);
else
warpPerspective(mask1_, transformedMask1_, motion1to0, mask1_.size(), INTER_NEAREST);
erode(transformedMask1_, transformedMask1_, Mat());
// update flow
optFlowEstimator_->run(grayFrame_, transformedGrayFrame1_, flowX_, flowY_, flowErrors_);
calcFlowMask(
flowX_, flowY_, flowErrors_, flowErrorThreshold_, mask, transformedMask1_,
flowMask_);
body.flowX = flowX_;
body.flowY = flowY_;
body.mask0 = flowMask_;
body.mask1 = transformedMask1_;
body.frame1 = transformedFrame1_;
fmm_.run(flowMask_, body);
completeFrameAccordingToFlow(
flowMask_, flowX_, flowY_, transformedFrame1_, transformedMask1_, distThresh_,
frame, mask);
}
}
class ColorAverageInpaintBody
{
public:
void operator ()(int x, int y)
{
float c1 = 0, c2 = 0, c3 = 0;
float wSum = 0;
static const int lut[8][2] = {{-1,-1}, {-1,0}, {-1,1}, {0,-1}, {0,1}, {1,-1}, {1,0}, {1,1}};
for (int i = 0; i < 8; ++i)
{
int qx = x + lut[i][0];
int qy = y + lut[i][1];
if (qy >= 0 && qy < mask.rows && qx >= 0 && qx < mask.cols && mask(qy,qx))
{
c1 += frame.at<uchar>(qy,3*qx);
c2 += frame.at<uchar>(qy,3*qx+1);
c3 += frame.at<uchar>(qy,3*qx+2);
wSum += 1;
}
}
float wSumInv = (std::fabs(wSum) > 0) ? (1.f / wSum) : 0; // if wSum is 0, c1-c3 will be 0 too
frame(y,x) = Point3_<uchar>(
static_cast<uchar>(c1*wSumInv),
static_cast<uchar>(c2*wSumInv),
static_cast<uchar>(c3*wSumInv));
mask(y,x) = 255;
}
cv::Mat_<uchar> mask;
cv::Mat_<cv::Point3_<uchar> > frame;
};
void ColorAverageInpainter::inpaint(int /*idx*/, Mat &frame, Mat &mask)
{
CV_INSTRUMENT_REGION();
ColorAverageInpaintBody body;
body.mask = mask;
body.frame = frame;
fmm_.run(mask, body);
}
void ColorInpainter::inpaint(int /*idx*/, Mat &frame, Mat &mask)
{
CV_INSTRUMENT_REGION();
bitwise_not(mask, invMask_);
cv::inpaint(frame, invMask_, frame, radius_, method_);
}
void calcFlowMask(
const Mat &flowX, const Mat &flowY, const Mat &errors, float maxError,
const Mat &mask0, const Mat &mask1, Mat &flowMask)
{
CV_INSTRUMENT_REGION();
CV_Assert(flowX.type() == CV_32F && flowX.size() == mask0.size());
CV_Assert(flowY.type() == CV_32F && flowY.size() == mask0.size());
CV_Assert(errors.type() == CV_32F && errors.size() == mask0.size());
CV_Assert(mask0.type() == CV_8U);
CV_Assert(mask1.type() == CV_8U && mask1.size() == mask0.size());
Mat_<float> flowX_(flowX), flowY_(flowY), errors_(errors);
Mat_<uchar> mask0_(mask0), mask1_(mask1);
flowMask.create(mask0.size(), CV_8U);
flowMask.setTo(0);
Mat_<uchar> flowMask_(flowMask);
for (int y0 = 0; y0 < flowMask_.rows; ++y0)
{
for (int x0 = 0; x0 < flowMask_.cols; ++x0)
{
if (mask0_(y0,x0) && errors_(y0,x0) < maxError)
{
int x1 = cvRound(x0 + flowX_(y0,x0));
int y1 = cvRound(y0 + flowY_(y0,x0));
if (x1 >= 0 && x1 < mask1_.cols && y1 >= 0 && y1 < mask1_.rows && mask1_(y1,x1))
flowMask_(y0,x0) = 255;
}
}
}
}
void completeFrameAccordingToFlow(
const Mat &flowMask, const Mat &flowX, const Mat &flowY, const Mat &frame1, const Mat &mask1,
float distThresh, Mat &frame0, Mat &mask0)
{
CV_INSTRUMENT_REGION();
CV_Assert(flowMask.type() == CV_8U);
CV_Assert(flowX.type() == CV_32F && flowX.size() == flowMask.size());
CV_Assert(flowY.type() == CV_32F && flowY.size() == flowMask.size());
CV_Assert(frame1.type() == CV_8UC3 && frame1.size() == flowMask.size());
CV_Assert(mask1.type() == CV_8U && mask1.size() == flowMask.size());
CV_Assert(frame0.type() == CV_8UC3 && frame0.size() == flowMask.size());
CV_Assert(mask0.type() == CV_8U && mask0.size() == flowMask.size());
Mat_<uchar> flowMask_(flowMask), mask1_(mask1), mask0_(mask0);
Mat_<float> flowX_(flowX), flowY_(flowY);
for (int y0 = 0; y0 < frame0.rows; ++y0)
{
for (int x0 = 0; x0 < frame0.cols; ++x0)
{
if (!mask0_(y0,x0) && flowMask_(y0,x0))
{
int x1 = cvRound(x0 + flowX_(y0,x0));
int y1 = cvRound(y0 + flowY_(y0,x0));
if (x1 >= 0 && x1 < frame1.cols && y1 >= 0 && y1 < frame1.rows && mask1_(y1,x1)
&& sqr(flowX_(y0,x0)) + sqr(flowY_(y0,x0)) < sqr(distThresh))
{
frame0.at<Point3_<uchar> >(y0,x0) = frame1.at<Point3_<uchar> >(y1,x1);
mask0_(y0,x0) = 255;
}
}
}
}
}
} // namespace videostab
} // namespace cv

View File

@ -1,63 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include <cstdio>
#include <cstdarg>
#include "opencv2/videostab/log.hpp"
namespace cv
{
namespace videostab
{
void LogToStdout::print(const char *format, ...)
{
va_list args;
va_start(args, format);
vprintf(format, args);
fflush(stdout);
va_end(args);
}
} // namespace videostab
} // namespace cv

View File

@ -1,718 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videostab/motion_stabilizing.hpp"
#include "opencv2/videostab/global_motion.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
#include "clp.hpp"
namespace cv
{
namespace videostab
{
void MotionStabilizationPipeline::stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range, Mat *stabilizationMotions)
{
std::vector<Mat> updatedMotions(motions.size());
for (size_t i = 0; i < motions.size(); ++i)
updatedMotions[i] = motions[i].clone();
std::vector<Mat> stabilizationMotions_(size);
for (int i = 0; i < size; ++i)
stabilizationMotions[i] = Mat::eye(3, 3, CV_32F);
for (size_t i = 0; i < stabilizers_.size(); ++i)
{
stabilizers_[i]->stabilize(size, updatedMotions, range, &stabilizationMotions_[0]);
for (int k = 0; k < size; ++k)
stabilizationMotions[k] = stabilizationMotions_[k] * stabilizationMotions[k];
for (int j = 0; j + 1 < size; ++j)
{
Mat S0 = stabilizationMotions[j];
Mat S1 = stabilizationMotions[j+1];
at(j, updatedMotions) = S1 * at(j, updatedMotions) * S0.inv();
}
}
}
void MotionFilterBase::stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> range, Mat *stabilizationMotions)
{
for (int i = 0; i < size; ++i)
stabilizationMotions[i] = stabilize(i, motions, range);
}
void GaussianMotionFilter::setParams(int _radius, float _stdev)
{
radius_ = _radius;
stdev_ = _stdev > 0.f ? _stdev : std::sqrt(static_cast<float>(_radius));
float sum = 0;
weight_.resize(2*radius_ + 1);
for (int i = -radius_; i <= radius_; ++i)
sum += weight_[radius_ + i] = std::exp(-i*i/(stdev_*stdev_));
for (int i = -radius_; i <= radius_; ++i)
weight_[radius_ + i] /= sum;
}
Mat GaussianMotionFilter::stabilize(int idx, const std::vector<Mat> &motions, std::pair<int,int> range)
{
const Mat &cur = at(idx, motions);
Mat res = Mat::zeros(cur.size(), cur.type());
float sum = 0.f;
int iMin = std::max(idx - radius_, range.first);
int iMax = std::min(idx + radius_, range.second);
for (int i = iMin; i <= iMax; ++i)
{
res += weight_[radius_ + i - idx] * getMotion(idx, i, motions);
sum += weight_[radius_ + i - idx];
}
return sum > 0.f ? res / sum : Mat::eye(cur.size(), cur.type());
}
LpMotionStabilizer::LpMotionStabilizer(MotionModel model)
{
setMotionModel(model);
setFrameSize(Size(0,0));
setTrimRatio(0.1f);
setWeight1(1);
setWeight2(10);
setWeight3(100);
setWeight4(100);
}
#ifndef HAVE_CLP
void LpMotionStabilizer::stabilize(int, const std::vector<Mat>&, std::pair<int,int>, Mat*)
{
CV_Error(Error::StsError, "The library is built without Clp support");
}
#else
void LpMotionStabilizer::stabilize(
int size, const std::vector<Mat> &motions, std::pair<int,int> /*range*/, Mat *stabilizationMotions)
{
CV_Assert(model_ <= MM_AFFINE);
int N = size;
const std::vector<Mat> &M = motions;
Mat *S = stabilizationMotions;
double w = frameSize_.width, h = frameSize_.height;
double tw = w * trimRatio_, th = h * trimRatio_;
int ncols = 4*N + 6*(N-1) + 6*(N-2) + 6*(N-3);
int nrows = 8*N + 2*6*(N-1) + 2*6*(N-2) + 2*6*(N-3);
rows_.clear();
cols_.clear();
elems_.clear();
obj_.assign(ncols, 0);
collb_.assign(ncols, -INF);
colub_.assign(ncols, INF);
int c = 4*N;
// for each slack variable e[t] (error bound)
for (int t = 0; t < N-1; ++t, c += 6)
{
// e[t](0,0)
obj_[c] = w4_*w1_;
collb_[c] = 0;
// e[t](0,1)
obj_[c+1] = w4_*w1_;
collb_[c+1] = 0;
// e[t](0,2)
obj_[c+2] = w1_;
collb_[c+2] = 0;
// e[t](1,0)
obj_[c+3] = w4_*w1_;
collb_[c+3] = 0;
// e[t](1,1)
obj_[c+4] = w4_*w1_;
collb_[c+4] = 0;
// e[t](1,2)
obj_[c+5] = w1_;
collb_[c+5] = 0;
}
for (int t = 0; t < N-2; ++t, c += 6)
{
// e[t](0,0)
obj_[c] = w4_*w2_;
collb_[c] = 0;
// e[t](0,1)
obj_[c+1] = w4_*w2_;
collb_[c+1] = 0;
// e[t](0,2)
obj_[c+2] = w2_;
collb_[c+2] = 0;
// e[t](1,0)
obj_[c+3] = w4_*w2_;
collb_[c+3] = 0;
// e[t](1,1)
obj_[c+4] = w4_*w2_;
collb_[c+4] = 0;
// e[t](1,2)
obj_[c+5] = w2_;
collb_[c+5] = 0;
}
for (int t = 0; t < N-3; ++t, c += 6)
{
// e[t](0,0)
obj_[c] = w4_*w3_;
collb_[c] = 0;
// e[t](0,1)
obj_[c+1] = w4_*w3_;
collb_[c+1] = 0;
// e[t](0,2)
obj_[c+2] = w3_;
collb_[c+2] = 0;
// e[t](1,0)
obj_[c+3] = w4_*w3_;
collb_[c+3] = 0;
// e[t](1,1)
obj_[c+4] = w4_*w3_;
collb_[c+4] = 0;
// e[t](1,2)
obj_[c+5] = w3_;
collb_[c+5] = 0;
}
elems_.clear();
rowlb_.assign(nrows, -INF);
rowub_.assign(nrows, INF);
int r = 0;
// frame corners
const Point2d pt[] = {Point2d(0,0), Point2d(w,0), Point2d(w,h), Point2d(0,h)};
// for each frame
for (int t = 0; t < N; ++t)
{
c = 4*t;
// for each frame corner
for (int i = 0; i < 4; ++i, r += 2)
{
set(r, c, pt[i].x); set(r, c+1, pt[i].y); set(r, c+2, 1);
set(r+1, c, pt[i].y); set(r+1, c+1, -pt[i].x); set(r+1, c+3, 1);
rowlb_[r] = pt[i].x-tw;
rowub_[r] = pt[i].x+tw;
rowlb_[r+1] = pt[i].y-th;
rowub_[r+1] = pt[i].y+th;
}
}
// for each S[t+1]M[t] - S[t] - e[t] <= 0 condition
for (int t = 0; t < N-1; ++t, r += 6)
{
Mat_<float> M0 = at(t,M);
c = 4*t;
set(r, c, -1);
set(r+1, c+1, -1);
set(r+2, c+2, -1);
set(r+3, c+1, 1);
set(r+4, c, -1);
set(r+5, c+3, -1);
c = 4*(t+1);
set(r, c, M0(0,0)); set(r, c+1, M0(1,0));
set(r+1, c, M0(0,1)); set(r+1, c+1, M0(1,1));
set(r+2, c, M0(0,2)); set(r+2, c+1, M0(1,2)); set(r+2, c+2, 1);
set(r+3, c, M0(1,0)); set(r+3, c+1, -M0(0,0));
set(r+4, c, M0(1,1)); set(r+4, c+1, -M0(0,1));
set(r+5, c, M0(1,2)); set(r+5, c+1, -M0(0,2)); set(r+5, c+3, 1);
c = 4*N + 6*t;
for (int i = 0; i < 6; ++i)
set(r+i, c+i, -1);
rowub_[r] = 0;
rowub_[r+1] = 0;
rowub_[r+2] = 0;
rowub_[r+3] = 0;
rowub_[r+4] = 0;
rowub_[r+5] = 0;
}
// for each 0 <= S[t+1]M[t] - S[t] + e[t] condition
for (int t = 0; t < N-1; ++t, r += 6)
{
Mat_<float> M0 = at(t,M);
c = 4*t;
set(r, c, -1);
set(r+1, c+1, -1);
set(r+2, c+2, -1);
set(r+3, c+1, 1);
set(r+4, c, -1);
set(r+5, c+3, -1);
c = 4*(t+1);
set(r, c, M0(0,0)); set(r, c+1, M0(1,0));
set(r+1, c, M0(0,1)); set(r+1, c+1, M0(1,1));
set(r+2, c, M0(0,2)); set(r+2, c+1, M0(1,2)); set(r+2, c+2, 1);
set(r+3, c, M0(1,0)); set(r+3, c+1, -M0(0,0));
set(r+4, c, M0(1,1)); set(r+4, c+1, -M0(0,1));
set(r+5, c, M0(1,2)); set(r+5, c+1, -M0(0,2)); set(r+5, c+3, 1);
c = 4*N + 6*t;
for (int i = 0; i < 6; ++i)
set(r+i, c+i, 1);
rowlb_[r] = 0;
rowlb_[r+1] = 0;
rowlb_[r+2] = 0;
rowlb_[r+3] = 0;
rowlb_[r+4] = 0;
rowlb_[r+5] = 0;
}
// for each S[t+2]M[t+1] - S[t+1]*(I+M[t]) + S[t] - e[t] <= 0 condition
for (int t = 0; t < N-2; ++t, r += 6)
{
Mat_<float> M0 = at(t,M), M1 = at(t+1,M);
c = 4*t;
set(r, c, 1);
set(r+1, c+1, 1);
set(r+2, c+2, 1);
set(r+3, c+1, -1);
set(r+4, c, 1);
set(r+5, c+3, 1);
c = 4*(t+1);
set(r, c, -M0(0,0)-1); set(r, c+1, -M0(1,0));
set(r+1, c, -M0(0,1)); set(r+1, c+1, -M0(1,1)-1);
set(r+2, c, -M0(0,2)); set(r+2, c+1, -M0(1,2)); set(r+2, c+2, -2);
set(r+3, c, -M0(1,0)); set(r+3, c+1, M0(0,0)+1);
set(r+4, c, -M0(1,1)-1); set(r+4, c+1, M0(0,1));
set(r+5, c, -M0(1,2)); set(r+5, c+1, M0(0,2)); set(r+5, c+3, -2);
c = 4*(t+2);
set(r, c, M1(0,0)); set(r, c+1, M1(1,0));
set(r+1, c, M1(0,1)); set(r+1, c+1, M1(1,1));
set(r+2, c, M1(0,2)); set(r+2, c+1, M1(1,2)); set(r+2, c+2, 1);
set(r+3, c, M1(1,0)); set(r+3, c+1, -M1(0,0));
set(r+4, c, M1(1,1)); set(r+4, c+1, -M1(0,1));
set(r+5, c, M1(1,2)); set(r+5, c+1, -M1(0,2)); set(r+5, c+3, 1);
c = 4*N + 6*(N-1) + 6*t;
for (int i = 0; i < 6; ++i)
set(r+i, c+i, -1);
rowub_[r] = 0;
rowub_[r+1] = 0;
rowub_[r+2] = 0;
rowub_[r+3] = 0;
rowub_[r+4] = 0;
rowub_[r+5] = 0;
}
// for each 0 <= S[t+2]M[t+1]] - S[t+1]*(I+M[t]) + S[t] + e[t] condition
for (int t = 0; t < N-2; ++t, r += 6)
{
Mat_<float> M0 = at(t,M), M1 = at(t+1,M);
c = 4*t;
set(r, c, 1);
set(r+1, c+1, 1);
set(r+2, c+2, 1);
set(r+3, c+1, -1);
set(r+4, c, 1);
set(r+5, c+3, 1);
c = 4*(t+1);
set(r, c, -M0(0,0)-1); set(r, c+1, -M0(1,0));
set(r+1, c, -M0(0,1)); set(r+1, c+1, -M0(1,1)-1);
set(r+2, c, -M0(0,2)); set(r+2, c+1, -M0(1,2)); set(r+2, c+2, -2);
set(r+3, c, -M0(1,0)); set(r+3, c+1, M0(0,0)+1);
set(r+4, c, -M0(1,1)-1); set(r+4, c+1, M0(0,1));
set(r+5, c, -M0(1,2)); set(r+5, c+1, M0(0,2)); set(r+5, c+3, -2);
c = 4*(t+2);
set(r, c, M1(0,0)); set(r, c+1, M1(1,0));
set(r+1, c, M1(0,1)); set(r+1, c+1, M1(1,1));
set(r+2, c, M1(0,2)); set(r+2, c+1, M1(1,2)); set(r+2, c+2, 1);
set(r+3, c, M1(1,0)); set(r+3, c+1, -M1(0,0));
set(r+4, c, M1(1,1)); set(r+4, c+1, -M1(0,1));
set(r+5, c, M1(1,2)); set(r+5, c+1, -M1(0,2)); set(r+5, c+3, 1);
c = 4*N + 6*(N-1) + 6*t;
for (int i = 0; i < 6; ++i)
set(r+i, c+i, 1);
rowlb_[r] = 0;
rowlb_[r+1] = 0;
rowlb_[r+2] = 0;
rowlb_[r+3] = 0;
rowlb_[r+4] = 0;
rowlb_[r+5] = 0;
}
// for each S[t+3]M[t+2] - S[t+2]*(I+2M[t+1]) + S[t+1]*(2*I+M[t]) - S[t] - e[t] <= 0 condition
for (int t = 0; t < N-3; ++t, r += 6)
{
Mat_<float> M0 = at(t,M), M1 = at(t+1,M), M2 = at(t+2,M);
c = 4*t;
set(r, c, -1);
set(r+1, c+1, -1);
set(r+2, c+2, -1);
set(r+3, c+1, 1);
set(r+4, c, -1);
set(r+5, c+3, -1);
c = 4*(t+1);
set(r, c, M0(0,0)+2); set(r, c+1, M0(1,0));
set(r+1, c, M0(0,1)); set(r+1, c+1, M0(1,1)+2);
set(r+2, c, M0(0,2)); set(r+2, c+1, M0(1,2)); set(r+2, c+2, 3);
set(r+3, c, M0(1,0)); set(r+3, c+1, -M0(0,0)-2);
set(r+4, c, M0(1,1)+2); set(r+4, c+1, -M0(0,1));
set(r+5, c, M0(1,2)); set(r+5, c+1, -M0(0,2)); set(r+5, c+3, 3);
c = 4*(t+2);
set(r, c, -2*M1(0,0)-1); set(r, c+1, -2*M1(1,0));
set(r+1, c, -2*M1(0,1)); set(r+1, c+1, -2*M1(1,1)-1);
set(r+2, c, -2*M1(0,2)); set(r+2, c+1, -2*M1(1,2)); set(r+2, c+2, -3);
set(r+3, c, -2*M1(1,0)); set(r+3, c+1, 2*M1(0,0)+1);
set(r+4, c, -2*M1(1,1)-1); set(r+4, c+1, 2*M1(0,1));
set(r+5, c, -2*M1(1,2)); set(r+5, c+1, 2*M1(0,2)); set(r+5, c+3, -3);
c = 4*(t+3);
set(r, c, M2(0,0)); set(r, c+1, M2(1,0));
set(r+1, c, M2(0,1)); set(r+1, c+1, M2(1,1));
set(r+2, c, M2(0,2)); set(r+2, c+1, M2(1,2)); set(r+2, c+2, 1);
set(r+3, c, M2(1,0)); set(r+3, c+1, -M2(0,0));
set(r+4, c, M2(1,1)); set(r+4, c+1, -M2(0,1));
set(r+5, c, M2(1,2)); set(r+5, c+1, -M2(0,2)); set(r+5, c+3, 1);
c = 4*N + 6*(N-1) + 6*(N-2) + 6*t;
for (int i = 0; i < 6; ++i)
set(r+i, c+i, -1);
rowub_[r] = 0;
rowub_[r+1] = 0;
rowub_[r+2] = 0;
rowub_[r+3] = 0;
rowub_[r+4] = 0;
rowub_[r+5] = 0;
}
// for each 0 <= S[t+3]M[t+2] - S[t+2]*(I+2M[t+1]) + S[t+1]*(2*I+M[t]) + e[t] condition
for (int t = 0; t < N-3; ++t, r += 6)
{
Mat_<float> M0 = at(t,M), M1 = at(t+1,M), M2 = at(t+2,M);
c = 4*t;
set(r, c, -1);
set(r+1, c+1, -1);
set(r+2, c+2, -1);
set(r+3, c+1, 1);
set(r+4, c, -1);
set(r+5, c+3, -1);
c = 4*(t+1);
set(r, c, M0(0,0)+2); set(r, c+1, M0(1,0));
set(r+1, c, M0(0,1)); set(r+1, c+1, M0(1,1)+2);
set(r+2, c, M0(0,2)); set(r+2, c+1, M0(1,2)); set(r+2, c+2, 3);
set(r+3, c, M0(1,0)); set(r+3, c+1, -M0(0,0)-2);
set(r+4, c, M0(1,1)+2); set(r+4, c+1, -M0(0,1));
set(r+5, c, M0(1,2)); set(r+5, c+1, -M0(0,2)); set(r+5, c+3, 3);
c = 4*(t+2);
set(r, c, -2*M1(0,0)-1); set(r, c+1, -2*M1(1,0));
set(r+1, c, -2*M1(0,1)); set(r+1, c+1, -2*M1(1,1)-1);
set(r+2, c, -2*M1(0,2)); set(r+2, c+1, -2*M1(1,2)); set(r+2, c+2, -3);
set(r+3, c, -2*M1(1,0)); set(r+3, c+1, 2*M1(0,0)+1);
set(r+4, c, -2*M1(1,1)-1); set(r+4, c+1, 2*M1(0,1));
set(r+5, c, -2*M1(1,2)); set(r+5, c+1, 2*M1(0,2)); set(r+5, c+3, -3);
c = 4*(t+3);
set(r, c, M2(0,0)); set(r, c+1, M2(1,0));
set(r+1, c, M2(0,1)); set(r+1, c+1, M2(1,1));
set(r+2, c, M2(0,2)); set(r+2, c+1, M2(1,2)); set(r+2, c+2, 1);
set(r+3, c, M2(1,0)); set(r+3, c+1, -M2(0,0));
set(r+4, c, M2(1,1)); set(r+4, c+1, -M2(0,1));
set(r+5, c, M2(1,2)); set(r+5, c+1, -M2(0,2)); set(r+5, c+3, 1);
c = 4*N + 6*(N-1) + 6*(N-2) + 6*t;
for (int i = 0; i < 6; ++i)
set(r+i, c+i, 1);
rowlb_[r] = 0;
rowlb_[r+1] = 0;
rowlb_[r+2] = 0;
rowlb_[r+3] = 0;
rowlb_[r+4] = 0;
rowlb_[r+5] = 0;
}
// solve
CoinPackedMatrix A(true, &rows_[0], &cols_[0], &elems_[0], elems_.size());
A.setDimensions(nrows, ncols);
ClpSimplex model(false);
model.loadProblem(A, &collb_[0], &colub_[0], &obj_[0], &rowlb_[0], &rowub_[0]);
ClpDualRowSteepest dualSteep(1);
model.setDualRowPivotAlgorithm(dualSteep);
ClpPrimalColumnSteepest primalSteep(1);
model.setPrimalColumnPivotAlgorithm(primalSteep);
model.scaling(1);
ClpPresolve presolveInfo;
Ptr<ClpSimplex> presolvedModel(presolveInfo.presolvedModel(model));
if (presolvedModel)
{
presolvedModel->dual();
presolveInfo.postsolve(true);
model.checkSolution();
model.primal(1);
}
else
{
model.dual();
model.checkSolution();
model.primal(1);
}
// save results
const double *sol = model.getColSolution();
c = 0;
for (int t = 0; t < N; ++t, c += 4)
{
Mat_<float> S0 = Mat::eye(3, 3, CV_32F);
S0(1,1) = S0(0,0) = sol[c];
S0(0,1) = sol[c+1];
S0(1,0) = -sol[c+1];
S0(0,2) = sol[c+2];
S0(1,2) = sol[c+3];
S[t] = S0;
}
}
#endif // #ifndef HAVE_CLP
static inline int areaSign(Point2f a, Point2f b, Point2f c)
{
double area = (b-a).cross(c-a);
if (area < -1e-5) return -1;
if (area > 1e-5) return 1;
return 0;
}
static inline bool segmentsIntersect(Point2f a, Point2f b, Point2f c, Point2f d)
{
return areaSign(a,b,c) * areaSign(a,b,d) < 0 &&
areaSign(c,d,a) * areaSign(c,d,b) < 0;
}
// Checks if rect a (with sides parallel to axis) is inside rect b (arbitrary).
// Rects must be passed in the [(0,0), (w,0), (w,h), (0,h)] order.
static inline bool isRectInside(const Point2f a[4], const Point2f b[4])
{
for (int i = 0; i < 4; ++i)
if (b[i].x > a[0].x && b[i].x < a[2].x && b[i].y > a[0].y && b[i].y < a[2].y)
return false;
for (int i = 0; i < 4; ++i)
for (int j = 0; j < 4; ++j)
if (segmentsIntersect(a[i], a[(i+1)%4], b[j], b[(j+1)%4]))
return false;
return true;
}
static inline bool isGoodMotion(const float M[], float w, float h, float dx, float dy)
{
Point2f pt[4] = {Point2f(0,0), Point2f(w,0), Point2f(w,h), Point2f(0,h)};
Point2f Mpt[4];
for (int i = 0; i < 4; ++i)
{
Mpt[i].x = M[0]*pt[i].x + M[1]*pt[i].y + M[2];
Mpt[i].y = M[3]*pt[i].x + M[4]*pt[i].y + M[5];
float z = M[6]*pt[i].x + M[7]*pt[i].y + M[8];
Mpt[i].x /= z;
Mpt[i].y /= z;
}
pt[0] = Point2f(dx, dy);
pt[1] = Point2f(w - dx, dy);
pt[2] = Point2f(w - dx, h - dy);
pt[3] = Point2f(dx, h - dy);
return isRectInside(pt, Mpt);
}
static inline void relaxMotion(const float M[], float t, float res[])
{
res[0] = M[0]*(1.f-t) + t;
res[1] = M[1]*(1.f-t);
res[2] = M[2]*(1.f-t);
res[3] = M[3]*(1.f-t);
res[4] = M[4]*(1.f-t) + t;
res[5] = M[5]*(1.f-t);
res[6] = M[6]*(1.f-t);
res[7] = M[7]*(1.f-t);
res[8] = M[8]*(1.f-t) + t;
}
Mat ensureInclusionConstraint(const Mat &M, Size size, float trimRatio)
{
CV_INSTRUMENT_REGION();
CV_Assert(M.size() == Size(3,3) && M.type() == CV_32F);
const float w = static_cast<float>(size.width);
const float h = static_cast<float>(size.height);
const float dx = floor(w * trimRatio);
const float dy = floor(h * trimRatio);
const float srcM[] =
{M.at<float>(0,0), M.at<float>(0,1), M.at<float>(0,2),
M.at<float>(1,0), M.at<float>(1,1), M.at<float>(1,2),
M.at<float>(2,0), M.at<float>(2,1), M.at<float>(2,2)};
float curM[9];
float t = 0;
relaxMotion(srcM, t, curM);
if (isGoodMotion(curM, w, h, dx, dy))
return M;
float l = 0, r = 1;
while (r - l > 1e-3f)
{
t = (l + r) * 0.5f;
relaxMotion(srcM, t, curM);
if (isGoodMotion(curM, w, h, dx, dy))
r = t;
else
l = t;
}
return (1 - r) * M + r * Mat::eye(3, 3, CV_32F);
}
// TODO can be estimated for O(1) time
float estimateOptimalTrimRatio(const Mat &M, Size size)
{
CV_INSTRUMENT_REGION();
CV_Assert(M.size() == Size(3,3) && M.type() == CV_32F);
const float w = static_cast<float>(size.width);
const float h = static_cast<float>(size.height);
Mat_<float> M_(M);
Point2f pt[4] = {Point2f(0,0), Point2f(w,0), Point2f(w,h), Point2f(0,h)};
Point2f Mpt[4];
float z;
for (int i = 0; i < 4; ++i)
{
Mpt[i].x = M_(0,0)*pt[i].x + M_(0,1)*pt[i].y + M_(0,2);
Mpt[i].y = M_(1,0)*pt[i].x + M_(1,1)*pt[i].y + M_(1,2);
z = M_(2,0)*pt[i].x + M_(2,1)*pt[i].y + M_(2,2);
Mpt[i].x /= z;
Mpt[i].y /= z;
}
float l = 0, r = 0.5f;
while (r - l > 1e-3f)
{
float t = (l + r) * 0.5f;
float dx = floor(w * t);
float dy = floor(h * t);
pt[0] = Point2f(dx, dy);
pt[1] = Point2f(w - dx, dy);
pt[2] = Point2f(w - dx, h - dy);
pt[3] = Point2f(dx, h - dy);
if (isRectInside(pt, Mpt))
r = t;
else
l = t;
}
return r;
}
} // namespace videostab
} // namespace cv

View File

@ -1,155 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/video.hpp"
#include "opencv2/videostab/optical_flow.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
#ifdef HAVE_OPENCV_CUDAARITHM
#include "opencv2/cudaarithm.hpp"
#endif
namespace cv
{
namespace videostab
{
void SparsePyrLkOptFlowEstimator::run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors)
{
calcOpticalFlowPyrLK(frame0, frame1, points0, points1, status, errors, winSize_, maxLevel_);
}
#ifdef HAVE_OPENCV_CUDAOPTFLOW
SparsePyrLkOptFlowEstimatorGpu::SparsePyrLkOptFlowEstimatorGpu()
{
CV_Assert(cuda::getCudaEnabledDeviceCount() > 0);
optFlowEstimator_ = cuda::SparsePyrLKOpticalFlow::create();
}
void SparsePyrLkOptFlowEstimatorGpu::run(
InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
OutputArray status, OutputArray errors)
{
frame0_.upload(frame0.getMat());
frame1_.upload(frame1.getMat());
points0_.upload(points0.getMat());
if (errors.needed())
{
run(frame0_, frame1_, points0_, points1_, status_, errors_);
errors_.download(errors.getMatRef());
}
else
run(frame0_, frame1_, points0_, points1_, status_);
points1_.download(points1.getMatRef());
status_.download(status.getMatRef());
}
void SparsePyrLkOptFlowEstimatorGpu::run(
const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0,
cuda::GpuMat &points1, cuda::GpuMat &status, cuda::GpuMat &errors)
{
optFlowEstimator_->setWinSize(winSize_);
optFlowEstimator_->setMaxLevel(maxLevel_);
optFlowEstimator_->calc(frame0, frame1, points0, points1, status, errors);
}
void SparsePyrLkOptFlowEstimatorGpu::run(
const cuda::GpuMat &frame0, const cuda::GpuMat &frame1, const cuda::GpuMat &points0,
cuda::GpuMat &points1, cuda::GpuMat &status)
{
optFlowEstimator_->setWinSize(winSize_);
optFlowEstimator_->setMaxLevel(maxLevel_);
optFlowEstimator_->calc(frame0, frame1, points0, points1, status);
}
DensePyrLkOptFlowEstimatorGpu::DensePyrLkOptFlowEstimatorGpu()
{
CV_Assert(cuda::getCudaEnabledDeviceCount() > 0);
optFlowEstimator_ = cuda::DensePyrLKOpticalFlow::create();
}
void DensePyrLkOptFlowEstimatorGpu::run(
InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
OutputArray errors)
{
frame0_.upload(frame0.getMat());
frame1_.upload(frame1.getMat());
optFlowEstimator_->setWinSize(winSize_);
optFlowEstimator_->setMaxLevel(maxLevel_);
if (errors.needed())
{
CV_Error(Error::StsNotImplemented, "DensePyrLkOptFlowEstimatorGpu doesn't support errors calculation");
}
else
{
cuda::GpuMat flow;
optFlowEstimator_->calc(frame0_, frame1_, flow);
cuda::GpuMat flows[2];
cuda::split(flow, flows);
flowX_ = flows[0];
flowY_ = flows[1];
}
flowX_.download(flowX.getMatRef());
flowY_.download(flowY.getMatRef());
}
#endif // HAVE_OPENCV_CUDAOPTFLOW
} // namespace videostab
} // namespace cv

View File

@ -1,197 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videostab/outlier_rejection.hpp"
namespace cv
{
namespace videostab
{
void NullOutlierRejector::process(
Size /*frameSize*/, InputArray points0, InputArray points1, OutputArray mask)
{
CV_INSTRUMENT_REGION();
CV_Assert(points0.type() == points1.type());
CV_Assert(points0.getMat().checkVector(2) == points1.getMat().checkVector(2));
int npoints = points0.getMat().checkVector(2);
mask.create(1, npoints, CV_8U);
Mat mask_ = mask.getMat();
mask_.setTo(1);
}
TranslationBasedLocalOutlierRejector::TranslationBasedLocalOutlierRejector()
{
setCellSize(Size(50, 50));
setRansacParams(RansacParams::default2dMotion(MM_TRANSLATION));
}
void TranslationBasedLocalOutlierRejector::process(
Size frameSize, InputArray points0, InputArray points1, OutputArray mask)
{
CV_INSTRUMENT_REGION();
CV_Assert(points0.type() == points1.type());
CV_Assert(points0.getMat().checkVector(2) == points1.getMat().checkVector(2));
int npoints = points0.getMat().checkVector(2);
const Point2f* points0_ = points0.getMat().ptr<Point2f>();
const Point2f* points1_ = points1.getMat().ptr<Point2f>();
mask.create(1, npoints, CV_8U);
uchar* mask_ = mask.getMat().ptr<uchar>();
Size ncells((frameSize.width + cellSize_.width - 1) / cellSize_.width,
(frameSize.height + cellSize_.height - 1) / cellSize_.height);
// fill grid cells
grid_.assign(ncells.area(), Cell());
for (int i = 0; i < npoints; ++i)
{
int cx = std::min(cvRound(points0_[i].x / cellSize_.width), ncells.width - 1);
int cy = std::min(cvRound(points0_[i].y / cellSize_.height), ncells.height - 1);
grid_[cy * ncells.width + cx].push_back(i);
}
// process each cell
RNG rng(0);
int niters = ransacParams_.niters();
std::vector<int> inliers;
for (size_t ci = 0; ci < grid_.size(); ++ci)
{
// estimate translation model at the current cell using RANSAC
float x1, y1;
const Cell &cell = grid_[ci];
int ninliers, ninliersMax = 0;
float dxBest = 0.f, dyBest = 0.f;
// find the best hypothesis
if (!cell.empty())
{
for (int iter = 0; iter < niters; ++iter)
{
int idx = cell[static_cast<unsigned>(rng) % cell.size()];
float dx = points1_[idx].x - points0_[idx].x;
float dy = points1_[idx].y - points0_[idx].y;
ninliers = 0;
for (size_t i = 0; i < cell.size(); ++i)
{
x1 = points0_[cell[i]].x + dx;
y1 = points0_[cell[i]].y + dy;
if (sqr(x1 - points1_[cell[i]].x) + sqr(y1 - points1_[cell[i]].y) <
sqr(ransacParams_.thresh))
{
ninliers++;
}
}
if (ninliers > ninliersMax)
{
ninliersMax = ninliers;
dxBest = dx;
dyBest = dy;
}
}
}
// get the best hypothesis inliers
ninliers = 0;
inliers.resize(ninliersMax);
for (size_t i = 0; i < cell.size(); ++i)
{
x1 = points0_[cell[i]].x + dxBest;
y1 = points0_[cell[i]].y + dyBest;
if (sqr(x1 - points1_[cell[i]].x) + sqr(y1 - points1_[cell[i]].y) <
sqr(ransacParams_.thresh))
{
inliers[ninliers++] = cell[i];
}
}
// refine the best hypothesis
dxBest = dyBest = 0.f;
for (size_t i = 0; i < inliers.size(); ++i)
{
dxBest += points1_[inliers[i]].x - points0_[inliers[i]].x;
dyBest += points1_[inliers[i]].y - points0_[inliers[i]].y;
}
if (!inliers.empty())
{
dxBest /= inliers.size();
dyBest /= inliers.size();
}
// set mask elements for refined model inliers
for (size_t i = 0; i < cell.size(); ++i)
{
x1 = points0_[cell[i]].x + dxBest;
y1 = points0_[cell[i]].y + dyBest;
if (sqr(x1 - points1_[cell[i]].x) + sqr(y1 - points1_[cell[i]].y) <
sqr(ransacParams_.thresh))
{
mask_[cell[i]] = 1;
}
else
{
mask_[cell[i]] = 0;
}
}
}
}
} // namespace videostab
} // namespace cv

View File

@ -1,67 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#ifndef __OPENCV_PRECOMP_HPP__
#define __OPENCV_PRECOMP_HPP__
#include <stdexcept>
#include <iostream>
#include <ctime>
#include <algorithm>
#include "opencv2/core.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/video.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/core/private.hpp"
// some aux. functions
inline float sqr(float x) { return x * x; }
inline float intensity(const cv::Point3_<uchar> &bgr)
{
return 0.3f*bgr.x + 0.59f*bgr.y + 0.11f*bgr.z;
}
#endif

View File

@ -1,510 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videostab/stabilizer.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
// for debug purposes
#define SAVE_MOTIONS 0
namespace cv
{
namespace videostab
{
StabilizerBase::StabilizerBase()
{
setLog(makePtr<LogToStdout>());
setFrameSource(makePtr<NullFrameSource>());
setMotionEstimator(makePtr<KeypointBasedMotionEstimator>(makePtr<MotionEstimatorRansacL2>()));
setDeblurer(makePtr<NullDeblurer>());
setInpainter(makePtr<NullInpainter>());
setRadius(15);
setTrimRatio(0);
setCorrectionForInclusion(false);
setBorderMode(BORDER_REPLICATE);
curPos_ = 0;
doDeblurring_ = false;
doInpainting_ = false;
processingStartTime_ = 0;
curStabilizedPos_ = 0;
}
void StabilizerBase::reset()
{
frameSize_ = Size(0, 0);
frameMask_ = Mat();
curPos_ = -1;
curStabilizedPos_ = -1;
doDeblurring_ = false;
preProcessedFrame_ = Mat();
doInpainting_ = false;
inpaintingMask_ = Mat();
frames_.clear();
motions_.clear();
blurrinessRates_.clear();
stabilizedFrames_.clear();
stabilizedMasks_.clear();
stabilizationMotions_.clear();
processingStartTime_ = 0;
}
Mat StabilizerBase::nextStabilizedFrame()
{
// check if we've processed all frames already
if (curStabilizedPos_ == curPos_ && curStabilizedPos_ != -1)
{
logProcessingTime();
return Mat();
}
bool processed;
do processed = doOneIteration();
while (processed && curStabilizedPos_ == -1);
// check if the frame source is empty
if (curStabilizedPos_ == -1)
{
logProcessingTime();
return Mat();
}
return postProcessFrame(at(curStabilizedPos_, stabilizedFrames_));
}
bool StabilizerBase::doOneIteration()
{
Mat frame = frameSource_->nextFrame();
if (!frame.empty())
{
curPos_++;
if (curPos_ > 0)
{
at(curPos_, frames_) = frame;
if (doDeblurring_)
at(curPos_, blurrinessRates_) = calcBlurriness(frame);
at(curPos_ - 1, motions_) = estimateMotion();
if (curPos_ >= radius_)
{
curStabilizedPos_ = curPos_ - radius_;
stabilizeFrame();
}
}
else
setUp(frame);
log_->print(".");
return true;
}
if (curStabilizedPos_ < curPos_)
{
curStabilizedPos_++;
at(curStabilizedPos_ + radius_, frames_) = at(curPos_, frames_);
at(curStabilizedPos_ + radius_ - 1, motions_) = Mat::eye(3, 3, CV_32F);
stabilizeFrame();
log_->print(".");
return true;
}
return false;
}
void StabilizerBase::setUp(const Mat &firstFrame)
{
InpainterBase *inpaint = inpainter_.get();
doInpainting_ = dynamic_cast<NullInpainter*>(inpaint) == 0;
if (doInpainting_)
{
inpainter_->setMotionModel(motionEstimator_->motionModel());
inpainter_->setFrames(frames_);
inpainter_->setMotions(motions_);
inpainter_->setStabilizedFrames(stabilizedFrames_);
inpainter_->setStabilizationMotions(stabilizationMotions_);
}
DeblurerBase *deblurer = deblurer_.get();
doDeblurring_ = dynamic_cast<NullDeblurer*>(deblurer) == 0;
if (doDeblurring_)
{
blurrinessRates_.resize(2*radius_ + 1);
float blurriness = calcBlurriness(firstFrame);
for (int i = -radius_; i <= 0; ++i)
at(i, blurrinessRates_) = blurriness;
deblurer_->setFrames(frames_);
deblurer_->setMotions(motions_);
deblurer_->setBlurrinessRates(blurrinessRates_);
}
log_->print("processing frames");
processingStartTime_ = clock();
}
void StabilizerBase::stabilizeFrame()
{
Mat stabilizationMotion = estimateStabilizationMotion();
if (doCorrectionForInclusion_)
stabilizationMotion = ensureInclusionConstraint(stabilizationMotion, frameSize_, trimRatio_);
at(curStabilizedPos_, stabilizationMotions_) = stabilizationMotion;
if (doDeblurring_)
{
at(curStabilizedPos_, frames_).copyTo(preProcessedFrame_);
deblurer_->deblur(curStabilizedPos_, preProcessedFrame_);
}
else
preProcessedFrame_ = at(curStabilizedPos_, frames_);
// apply stabilization transformation
if (motionEstimator_->motionModel() != MM_HOMOGRAPHY)
warpAffine(
preProcessedFrame_, at(curStabilizedPos_, stabilizedFrames_),
stabilizationMotion(Rect(0,0,3,2)), frameSize_, INTER_LINEAR, borderMode_);
else
warpPerspective(
preProcessedFrame_, at(curStabilizedPos_, stabilizedFrames_),
stabilizationMotion, frameSize_, INTER_LINEAR, borderMode_);
if (doInpainting_)
{
if (motionEstimator_->motionModel() != MM_HOMOGRAPHY)
warpAffine(
frameMask_, at(curStabilizedPos_, stabilizedMasks_),
stabilizationMotion(Rect(0,0,3,2)), frameSize_, INTER_NEAREST);
else
warpPerspective(
frameMask_, at(curStabilizedPos_, stabilizedMasks_),
stabilizationMotion, frameSize_, INTER_NEAREST);
erode(at(curStabilizedPos_, stabilizedMasks_), at(curStabilizedPos_, stabilizedMasks_),
Mat());
at(curStabilizedPos_, stabilizedMasks_).copyTo(inpaintingMask_);
inpainter_->inpaint(
curStabilizedPos_, at(curStabilizedPos_, stabilizedFrames_), inpaintingMask_);
}
}
Mat StabilizerBase::postProcessFrame(const Mat &frame)
{
// trim frame
int dx = static_cast<int>(floor(trimRatio_ * frame.cols));
int dy = static_cast<int>(floor(trimRatio_ * frame.rows));
return frame(Rect(dx, dy, frame.cols - 2*dx, frame.rows - 2*dy));
}
void StabilizerBase::logProcessingTime()
{
clock_t elapsedTime = clock() - processingStartTime_;
log_->print("\nprocessing time: %.3f sec\n", static_cast<double>(elapsedTime) / CLOCKS_PER_SEC);
}
OnePassStabilizer::OnePassStabilizer()
{
setMotionFilter(makePtr<GaussianMotionFilter>());
reset();
}
void OnePassStabilizer::reset()
{
StabilizerBase::reset();
}
void OnePassStabilizer::setUp(const Mat &firstFrame)
{
frameSize_ = firstFrame.size();
frameMask_.create(frameSize_, CV_8U);
frameMask_.setTo(255);
int cacheSize = 2*radius_ + 1;
frames_.resize(cacheSize);
stabilizedFrames_.resize(cacheSize);
stabilizedMasks_.resize(cacheSize);
motions_.resize(cacheSize);
stabilizationMotions_.resize(cacheSize);
for (int i = -radius_; i < 0; ++i)
{
at(i, motions_) = Mat::eye(3, 3, CV_32F);
at(i, frames_) = firstFrame;
}
at(0, frames_) = firstFrame;
StabilizerBase::setUp(firstFrame);
}
Mat OnePassStabilizer::estimateMotion()
{
return motionEstimator_->estimate(at(curPos_ - 1, frames_), at(curPos_, frames_));
}
Mat OnePassStabilizer::estimateStabilizationMotion()
{
return motionFilter_->stabilize(curStabilizedPos_, motions_, std::make_pair(0, curPos_));
}
Mat OnePassStabilizer::postProcessFrame(const Mat &frame)
{
return StabilizerBase::postProcessFrame(frame);
}
TwoPassStabilizer::TwoPassStabilizer()
{
setMotionStabilizer(makePtr<GaussianMotionFilter>());
setWobbleSuppressor(makePtr<NullWobbleSuppressor>());
setEstimateTrimRatio(false);
reset();
}
void TwoPassStabilizer::reset()
{
StabilizerBase::reset();
frameCount_ = 0;
isPrePassDone_ = false;
doWobbleSuppression_ = false;
motions2_.clear();
suppressedFrame_ = Mat();
}
Mat TwoPassStabilizer::nextFrame()
{
runPrePassIfNecessary();
return StabilizerBase::nextStabilizedFrame();
}
#if SAVE_MOTIONS
static void saveMotions(
int frameCount, const std::vector<Mat> &motions, const std::vector<Mat> &stabilizationMotions)
{
std::ofstream fm("log_motions.csv");
for (int i = 0; i < frameCount - 1; ++i)
{
Mat_<float> M = at(i, motions);
fm << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << std::endl;
}
std::ofstream fo("log_orig.csv");
for (int i = 0; i < frameCount; ++i)
{
Mat_<float> M = getMotion(0, i, motions);
fo << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << std::endl;
}
std::ofstream fs("log_stab.csv");
for (int i = 0; i < frameCount; ++i)
{
Mat_<float> M = stabilizationMotions[i] * getMotion(0, i, motions);
fs << M(0,0) << " " << M(0,1) << " " << M(0,2) << " "
<< M(1,0) << " " << M(1,1) << " " << M(1,2) << " "
<< M(2,0) << " " << M(2,1) << " " << M(2,2) << std::endl;
}
}
#endif
void TwoPassStabilizer::runPrePassIfNecessary()
{
if (!isPrePassDone_)
{
// check if we must do wobble suppression
WobbleSuppressorBase *wobble = wobbleSuppressor_.get();
doWobbleSuppression_ = dynamic_cast<NullWobbleSuppressor*>(wobble) == 0;
// estimate motions
clock_t startTime = clock();
log_->print("first pass: estimating motions");
Mat prevFrame, frame;
bool ok = true, ok2 = true;
while (!(frame = frameSource_->nextFrame()).empty())
{
if (frameCount_ > 0)
{
motions_.push_back(motionEstimator_->estimate(prevFrame, frame, &ok));
if (doWobbleSuppression_)
{
Mat M = wobbleSuppressor_->motionEstimator()->estimate(prevFrame, frame, &ok2);
if (ok2)
motions2_.push_back(M);
else
motions2_.push_back(motions_.back());
}
if (ok)
{
if (ok2) log_->print(".");
else log_->print("?");
}
else log_->print("x");
}
else
{
frameSize_ = frame.size();
frameMask_.create(frameSize_, CV_8U);
frameMask_.setTo(255);
}
prevFrame = frame;
frameCount_++;
}
clock_t elapsedTime = clock() - startTime;
log_->print("\nmotion estimation time: %.3f sec\n",
static_cast<double>(elapsedTime) / CLOCKS_PER_SEC);
// add aux. motions
for (int i = 0; i < radius_; ++i)
motions_.push_back(Mat::eye(3, 3, CV_32F));
// stabilize
startTime = clock();
stabilizationMotions_.resize(frameCount_);
motionStabilizer_->stabilize(
frameCount_, motions_, std::make_pair(0, frameCount_ - 1), &stabilizationMotions_[0]);
elapsedTime = clock() - startTime;
log_->print("motion stabilization time: %.3f sec\n",
static_cast<double>(elapsedTime) / CLOCKS_PER_SEC);
// estimate optimal trim ratio if necessary
if (mustEstTrimRatio_)
{
trimRatio_ = 0;
for (int i = 0; i < frameCount_; ++i)
{
Mat S = stabilizationMotions_[i];
trimRatio_ = std::max(trimRatio_, estimateOptimalTrimRatio(S, frameSize_));
}
log_->print("estimated trim ratio: %f\n", static_cast<double>(trimRatio_));
}
#if SAVE_MOTIONS
saveMotions(frameCount_, motions_, stabilizationMotions_);
#endif
isPrePassDone_ = true;
frameSource_->reset();
}
}
void TwoPassStabilizer::setUp(const Mat &firstFrame)
{
int cacheSize = 2*radius_ + 1;
frames_.resize(cacheSize);
stabilizedFrames_.resize(cacheSize);
stabilizedMasks_.resize(cacheSize);
for (int i = -radius_; i <= 0; ++i)
at(i, frames_) = firstFrame;
WobbleSuppressorBase *wobble = wobbleSuppressor_.get();
doWobbleSuppression_ = dynamic_cast<NullWobbleSuppressor*>(wobble) == 0;
if (doWobbleSuppression_)
{
wobbleSuppressor_->setFrameCount(frameCount_);
wobbleSuppressor_->setMotions(motions_);
wobbleSuppressor_->setMotions2(motions2_);
wobbleSuppressor_->setStabilizationMotions(stabilizationMotions_);
}
StabilizerBase::setUp(firstFrame);
}
Mat TwoPassStabilizer::estimateMotion()
{
return motions_[curPos_ - 1].clone();
}
Mat TwoPassStabilizer::estimateStabilizationMotion()
{
return stabilizationMotions_[curStabilizedPos_].clone();
}
Mat TwoPassStabilizer::postProcessFrame(const Mat &frame)
{
wobbleSuppressor_->suppress(curStabilizedPos_, frame, suppressedFrame_);
return StabilizerBase::postProcessFrame(suppressedFrame_);
}
} // namespace videostab
} // namespace cv

View File

@ -1,188 +0,0 @@
/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
// Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include "precomp.hpp"
#include "opencv2/videostab/wobble_suppression.hpp"
#include "opencv2/videostab/ring_buffer.hpp"
#include "opencv2/core/private.cuda.hpp"
#ifdef HAVE_OPENCV_CUDAWARPING
# include "opencv2/cudawarping.hpp"
#endif
#if defined(HAVE_OPENCV_CUDAWARPING)
#if !defined HAVE_CUDA || defined(CUDA_DISABLER)
namespace cv { namespace cuda {
static void calcWobbleSuppressionMaps(int, int, int, Size, const Mat&, const Mat&, GpuMat&, GpuMat&) { throw_no_cuda(); }
}}
#else
namespace cv { namespace cuda { namespace device { namespace globmotion {
void calcWobbleSuppressionMaps(
int left, int idx, int right, int width, int height,
const float *ml, const float *mr, PtrStepSzf mapx, PtrStepSzf mapy);
}}}}
namespace cv { namespace cuda {
static void calcWobbleSuppressionMaps(
int left, int idx, int right, Size size, const Mat &ml, const Mat &mr,
GpuMat &mapx, GpuMat &mapy)
{
CV_Assert(ml.size() == Size(3, 3) && ml.type() == CV_32F && ml.isContinuous());
CV_Assert(mr.size() == Size(3, 3) && mr.type() == CV_32F && mr.isContinuous());
mapx.create(size, CV_32F);
mapy.create(size, CV_32F);
cv::cuda::device::globmotion::calcWobbleSuppressionMaps(
left, idx, right, size.width, size.height,
ml.ptr<float>(), mr.ptr<float>(), mapx, mapy);
}
}}
#endif
#endif
namespace cv
{
namespace videostab
{
WobbleSuppressorBase::WobbleSuppressorBase() : frameCount_(0), motions_(0), motions2_(0), stabilizationMotions_(0)
{
setMotionEstimator(makePtr<KeypointBasedMotionEstimator>(makePtr<MotionEstimatorRansacL2>(MM_HOMOGRAPHY)));
}
void NullWobbleSuppressor::suppress(int /*idx*/, const Mat &frame, Mat &result)
{
result = frame;
}
void MoreAccurateMotionWobbleSuppressor::suppress(int idx, const Mat &frame, Mat &result)
{
CV_Assert(motions_ && stabilizationMotions_);
if (idx % period_ == 0)
{
result = frame;
return;
}
int k1 = idx / period_ * period_;
int k2 = std::min(k1 + period_, frameCount_ - 1);
Mat S1 = (*stabilizationMotions_)[idx];
Mat_<float> ML = S1 * getMotion(k1, idx, *motions2_) * getMotion(k1, idx, *motions_).inv() * S1.inv();
Mat_<float> MR = S1 * getMotion(idx, k2, *motions2_).inv() * getMotion(idx, k2, *motions_) * S1.inv();
mapx_.create(frame.size());
mapy_.create(frame.size());
float xl, yl, zl, wl;
float xr, yr, zr, wr;
for (int y = 0; y < frame.rows; ++y)
{
for (int x = 0; x < frame.cols; ++x)
{
xl = ML(0,0)*x + ML(0,1)*y + ML(0,2);
yl = ML(1,0)*x + ML(1,1)*y + ML(1,2);
zl = ML(2,0)*x + ML(2,1)*y + ML(2,2);
xl /= zl; yl /= zl;
wl = float(idx - k1);
xr = MR(0,0)*x + MR(0,1)*y + MR(0,2);
yr = MR(1,0)*x + MR(1,1)*y + MR(1,2);
zr = MR(2,0)*x + MR(2,1)*y + MR(2,2);
xr /= zr; yr /= zr;
wr = float(k2 - idx);
mapx_(y,x) = (wr * xl + wl * xr) / (wl + wr);
mapy_(y,x) = (wr * yl + wl * yr) / (wl + wr);
}
}
if (result.data == frame.data)
result = Mat(frame.size(), frame.type());
remap(frame, result, mapx_, mapy_, INTER_LINEAR, BORDER_REPLICATE);
}
#if defined(HAVE_OPENCV_CUDAWARPING)
void MoreAccurateMotionWobbleSuppressorGpu::suppress(int idx, const cuda::GpuMat &frame, cuda::GpuMat &result)
{
CV_Assert(motions_ && stabilizationMotions_);
if (idx % period_ == 0)
{
result = frame;
return;
}
int k1 = idx / period_ * period_;
int k2 = std::min(k1 + period_, frameCount_ - 1);
Mat S1 = (*stabilizationMotions_)[idx];
Mat ML = S1 * getMotion(k1, idx, *motions2_) * getMotion(k1, idx, *motions_).inv() * S1.inv();
Mat MR = S1 * getMotion(idx, k2, *motions2_).inv() * getMotion(idx, k2, *motions_) * S1.inv();
cuda::calcWobbleSuppressionMaps(k1, idx, k2, frame.size(), ML, MR, mapx_, mapy_);
if (result.data == frame.data)
result = cuda::GpuMat(frame.size(), frame.type());
cuda::remap(frame, result, mapx_, mapy_, INTER_LINEAR, BORDER_REPLICATE);
}
void MoreAccurateMotionWobbleSuppressorGpu::suppress(int idx, const Mat &frame, Mat &result)
{
frameDevice_.upload(frame);
suppress(idx, frameDevice_, resultDevice_);
resultDevice_.download(result);
}
#endif
} // namespace videostab
} // namespace cv

View File

@ -1,11 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
#if defined(HAVE_HPX)
#include <hpx/hpx_main.hpp>
#endif
CV_TEST_MAIN("cv")

View File

@ -1,175 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#include "test_precomp.hpp"
namespace opencv_test { namespace {
namespace testUtil
{
cv::RNG rng(/*std::time(0)*/0);
const float sigma = 1.f;
const float pointsMaxX = 500.f;
const float pointsMaxY = 500.f;
const int testRun = 5000;
void generatePoints(cv::Mat points);
void addNoise(cv::Mat points);
cv::Mat generateTransform(const cv::videostab::MotionModel model);
double performTest(const cv::videostab::MotionModel model, int size);
}
void testUtil::generatePoints(cv::Mat points)
{
CV_Assert(!points.empty());
for(int i = 0; i < points.cols; ++i)
{
points.at<float>(0, i) = rng.uniform(0.f, pointsMaxX);
points.at<float>(1, i) = rng.uniform(0.f, pointsMaxY);
points.at<float>(2, i) = 1.f;
}
}
void testUtil::addNoise(cv::Mat points)
{
CV_Assert(!points.empty());
for(int i = 0; i < points.cols; i++)
{
points.at<float>(0, i) += static_cast<float>(rng.gaussian(sigma));
points.at<float>(1, i) += static_cast<float>(rng.gaussian(sigma));
}
}
cv::Mat testUtil::generateTransform(const cv::videostab::MotionModel model)
{
/*----------Params----------*/
const float minAngle = 0.f, maxAngle = static_cast<float>(CV_PI);
const float minScale = 0.5f, maxScale = 2.f;
const float maxTranslation = 100.f;
const float affineCoeff = 3.f;
/*----------Params----------*/
cv::Mat transform = cv::Mat::eye(3, 3, CV_32F);
if(model != cv::videostab::MM_ROTATION)
{
transform.at<float>(0,2) = rng.uniform(-maxTranslation, maxTranslation);
transform.at<float>(1,2) = rng.uniform(-maxTranslation, maxTranslation);
}
if(model != cv::videostab::MM_AFFINE)
{
if(model != cv::videostab::MM_TRANSLATION_AND_SCALE &&
model != cv::videostab::MM_TRANSLATION)
{
const float angle = rng.uniform(minAngle, maxAngle);
transform.at<float>(1,1) = transform.at<float>(0,0) = std::cos(angle);
transform.at<float>(0,1) = std::sin(angle);
transform.at<float>(1,0) = -transform.at<float>(0,1);
}
if(model == cv::videostab::MM_TRANSLATION_AND_SCALE ||
model == cv::videostab::MM_SIMILARITY)
{
const float scale = rng.uniform(minScale, maxScale);
transform.at<float>(0,0) *= scale;
transform.at<float>(1,1) *= scale;
}
}
else
{
transform.at<float>(0,0) = rng.uniform(-affineCoeff, affineCoeff);
transform.at<float>(0,1) = rng.uniform(-affineCoeff, affineCoeff);
transform.at<float>(1,0) = rng.uniform(-affineCoeff, affineCoeff);
transform.at<float>(1,1) = rng.uniform(-affineCoeff, affineCoeff);
}
return transform;
}
double testUtil::performTest(const cv::videostab::MotionModel model, int size)
{
cv::Ptr<cv::videostab::MotionEstimatorRansacL2> estimator = cv::makePtr<cv::videostab::MotionEstimatorRansacL2>(model);
estimator->setRansacParams(cv::videostab::RansacParams(size, 3.f*testUtil::sigma /*3 sigma rule*/, 0.5f, 0.5f));
double disparity = 0.;
for(int attempt = 0; attempt < testUtil::testRun; attempt++)
{
const cv::Mat transform = testUtil::generateTransform(model);
const int pointsNumber = testUtil::rng.uniform(10, 100);
cv::Mat points(3, pointsNumber, CV_32F);
testUtil::generatePoints(points);
cv::Mat transformedPoints = transform * points;
testUtil::addNoise(transformedPoints);
const cv::Mat src = points.rowRange(0,2).t();
const cv::Mat dst = transformedPoints.rowRange(0,2).t();
bool isOK = false;
const cv::Mat estTransform = estimator->estimate(src.reshape(2), dst.reshape(2), &isOK);
CV_Assert(isOK);
const cv::Mat testPoints = estTransform * points;
const double norm = cv::norm(testPoints, transformedPoints, cv::NORM_INF);
disparity = std::max(disparity, norm);
}
return disparity;
}
TEST(Regression, MM_TRANSLATION)
{
EXPECT_LT(testUtil::performTest(cv::videostab::MM_TRANSLATION, 2), 7.f);
}
TEST(Regression, MM_TRANSLATION_AND_SCALE)
{
EXPECT_LT(testUtil::performTest(cv::videostab::MM_TRANSLATION_AND_SCALE, 3), 7.f);
}
TEST(Regression, MM_ROTATION)
{
EXPECT_LT(testUtil::performTest(cv::videostab::MM_ROTATION, 2), 7.f);
}
TEST(Regression, MM_RIGID)
{
EXPECT_LT(testUtil::performTest(cv::videostab::MM_RIGID, 3), 7.f);
}
TEST(Regression, MM_SIMILARITY)
{
EXPECT_LT(testUtil::performTest(cv::videostab::MM_SIMILARITY, 4), 7.f);
}
TEST(Regression, MM_AFFINE)
{
EXPECT_LT(testUtil::performTest(cv::videostab::MM_AFFINE, 6), 9.f);
}
}} // namespace

View File

@ -1,11 +0,0 @@
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html
#ifndef __OPENCV_TEST_PRECOMP_HPP__
#define __OPENCV_TEST_PRECOMP_HPP__
#include "opencv2/ts.hpp"
#include "opencv2/videostab.hpp"
#endif

View File

@ -14,7 +14,6 @@ set(OPENCV_CPP_SAMPLES_REQUIRED_DEPS
opencv_features2d
opencv_calib3d
opencv_stitching
opencv_videostab
${OPENCV_MODULES_PUBLIC}
${OpenCV_LIB_COMPONENTS})
ocv_check_dependencies(${OPENCV_CPP_SAMPLES_REQUIRED_DEPS})

View File

@ -1,566 +0,0 @@
#include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <stdexcept>
#include "opencv2/core.hpp"
#include <opencv2/core/utility.hpp>
#include "opencv2/video.hpp"
#include "opencv2/imgproc.hpp"
#include "opencv2/videoio.hpp"
#include "opencv2/highgui.hpp"
#include "opencv2/videostab.hpp"
#include "opencv2/opencv_modules.hpp"
#define arg(name) cmd.get<string>(name)
#define argb(name) cmd.get<bool>(name)
#define argi(name) cmd.get<int>(name)
#define argf(name) cmd.get<float>(name)
#define argd(name) cmd.get<double>(name)
using namespace std;
using namespace cv;
using namespace cv::videostab;
Ptr<IFrameSource> stabilizedFrames;
string saveMotionsPath;
double outputFps;
string outputPath;
bool quietMode;
void run();
void saveMotionsIfNecessary();
void printHelp();
MotionModel motionModel(const string &str);
void run()
{
VideoWriter writer;
Mat stabilizedFrame;
int nframes = 0;
// for each stabilized frame
while (!(stabilizedFrame = stabilizedFrames->nextFrame()).empty())
{
nframes++;
// init writer (once) and save stabilized frame
if (!outputPath.empty())
{
if (!writer.isOpened())
writer.open(outputPath, VideoWriter::fourcc('X','V','I','D'),
outputFps, stabilizedFrame.size());
writer << stabilizedFrame;
}
// show stabilized frame
if (!quietMode)
{
imshow("stabilizedFrame", stabilizedFrame);
char key = static_cast<char>(waitKey(3));
if (key == 27) { cout << endl; break; }
}
}
cout << "processed frames: " << nframes << endl
<< "finished\n";
}
void printHelp()
{
cout << "OpenCV video stabilizer.\n"
"Usage: videostab <file_path> [arguments]\n\n"
"Arguments:\n"
" -m=, --model=(transl|transl_and_scale|rigid|similarity|affine|homography)\n"
" Set motion model. The default is affine.\n"
" -lp=, --lin-prog-motion-est=(yes|no)\n"
" Turn on/off LP based motion estimation. The default is no.\n"
" --subset=(<int_number>|auto)\n"
" Number of random samples per one motion hypothesis. The default is auto.\n"
" --thresh=(<float_number>|auto)\n"
" Maximum error to classify match as inlier. The default is auto.\n"
" --outlier-ratio=<float_number>\n"
" Motion estimation outlier ratio hypothesis. The default is 0.5.\n"
" --min-inlier-ratio=<float_number>\n"
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
" --nkps=<int_number>\n"
" Number of keypoints to find in each frame. The default is 1000.\n"
" --local-outlier-rejection=(yes|no)\n"
" Perform local outlier rejection. The default is no.\n\n"
" -sm=, --save-motions=(<file_path>|no)\n"
" Save estimated motions into file. The default is no.\n"
" -lm=, --load-motions=(<file_path>|no)\n"
" Load motions from file. The default is no.\n\n"
" -r=, --radius=<int_number>\n"
" Set sliding window radius. The default is 15.\n"
" --stdev=(<float_number>|auto)\n"
" Set smoothing weights standard deviation. The default is auto\n"
" (i.e. sqrt(radius)).\n"
" -lps=, --lin-prog-stab=(yes|no)\n"
" Turn on/off linear programming based stabilization method.\n"
" --lps-trim-ratio=(<float_number>|auto)\n"
" Trimming ratio used in linear programming based method.\n"
" --lps-w1=(<float_number>|1)\n"
" 1st derivative weight. The default is 1.\n"
" --lps-w2=(<float_number>|10)\n"
" 2nd derivative weight. The default is 10.\n"
" --lps-w3=(<float_number>|100)\n"
" 3rd derivative weight. The default is 100.\n"
" --lps-w4=(<float_number>|100)\n"
" Non-translation motion components weight. The default is 100.\n\n"
" --deblur=(yes|no)\n"
" Do deblurring.\n"
" --deblur-sens=<float_number>\n"
" Set deblurring sensitivity (from 0 to +inf). The default is 0.1.\n\n"
" -t=, --trim-ratio=<float_number>\n"
" Set trimming ratio (from 0 to 0.5). The default is 0.1.\n"
" -et=, --est-trim=(yes|no)\n"
" Estimate trim ratio automatically. The default is yes.\n"
" -ic=, --incl-constr=(yes|no)\n"
" Ensure the inclusion constraint is always satisfied. The default is no.\n\n"
" -bm=, --border-mode=(replicate|reflect|const)\n"
" Set border extrapolation mode. The default is replicate.\n\n"
" --mosaic=(yes|no)\n"
" Do consistent mosaicing. The default is no.\n"
" --mosaic-stdev=<float_number>\n"
" Consistent mosaicing stdev threshold. The default is 10.0.\n\n"
" -mi=, --motion-inpaint=(yes|no)\n"
" Do motion inpainting (requires CUDA support). The default is no.\n"
" --mi-dist-thresh=<float_number>\n"
" Estimated flow distance threshold for motion inpainting. The default is 5.0.\n\n"
" -ci=, --color-inpaint=(no|average|ns|telea)\n"
" Do color inpainting. The default is no.\n"
" --ci-radius=<float_number>\n"
" Set color inpainting radius (for ns and telea options only).\n"
" The default is 2.0\n\n"
" -ws=, --wobble-suppress=(yes|no)\n"
" Perform wobble suppression. The default is no.\n"
" --ws-lp=(yes|no)\n"
" Turn on/off LP based motion estimation. The default is no.\n"
" --ws-period=<int_number>\n"
" Set wobble suppression period. The default is 30.\n"
" --ws-model=(transl|transl_and_scale|rigid|similarity|affine|homography)\n"
" Set wobble suppression motion model (must have more DOF than motion \n"
" estimation model). The default is homography.\n"
" --ws-subset=(<int_number>|auto)\n"
" Number of random samples per one motion hypothesis. The default is auto.\n"
" --ws-thresh=(<float_number>|auto)\n"
" Maximum error to classify match as inlier. The default is auto.\n"
" --ws-outlier-ratio=<float_number>\n"
" Motion estimation outlier ratio hypothesis. The default is 0.5.\n"
" --ws-min-inlier-ratio=<float_number>\n"
" Minimum inlier ratio to decide if estimated motion is OK. The default is 0.1.\n"
" --ws-nkps=<int_number>\n"
" Number of keypoints to find in each frame. The default is 1000.\n"
" --ws-local-outlier-rejection=(yes|no)\n"
" Perform local outlier rejection. The default is no.\n\n"
" -sm2=, --save-motions2=(<file_path>|no)\n"
" Save motions estimated for wobble suppression. The default is no.\n"
" -lm2=, --load-motions2=(<file_path>|no)\n"
" Load motions for wobble suppression from file. The default is no.\n\n"
" -gpu=(yes|no)\n"
" Use CUDA optimization whenever possible. The default is no.\n\n"
" -o=, --output=(no|<file_path>)\n"
" Set output file path explicitly. The default is stabilized.avi.\n"
" --fps=(<float_number>|auto)\n"
" Set output video FPS explicitly. By default the source FPS is used (auto).\n"
" -q, --quiet\n"
" Don't show output video frames.\n\n"
" -h, --help\n"
" Print help.\n\n"
"Note: some argument configurations lead to two passes, some to single pass.\n\n";
}
// motion estimator builders are for concise creation of motion estimators
class IMotionEstimatorBuilder
{
public:
virtual ~IMotionEstimatorBuilder() {}
virtual Ptr<ImageMotionEstimatorBase> build() = 0;
protected:
IMotionEstimatorBuilder(CommandLineParser &command) : cmd(command) {}
CommandLineParser cmd;
};
class MotionEstimatorRansacL2Builder : public IMotionEstimatorBuilder
{
public:
MotionEstimatorRansacL2Builder(CommandLineParser &command, bool use_gpu, const string &_prefix = "")
: IMotionEstimatorBuilder(command), gpu(use_gpu), prefix(_prefix) {}
virtual Ptr<ImageMotionEstimatorBase> build() CV_OVERRIDE
{
Ptr<MotionEstimatorRansacL2> est = makePtr<MotionEstimatorRansacL2>(motionModel(arg(prefix + "model")));
RansacParams ransac = est->ransacParams();
if (arg(prefix + "subset") != "auto")
ransac.size = argi(prefix + "subset");
if (arg(prefix + "thresh") != "auto")
ransac.thresh = argf(prefix + "thresh");
ransac.eps = argf(prefix + "outlier-ratio");
est->setRansacParams(ransac);
est->setMinInlierRatio(argf(prefix + "min-inlier-ratio"));
Ptr<IOutlierRejector> outlierRejector = makePtr<NullOutlierRejector>();
if (arg(prefix + "local-outlier-rejection") == "yes")
{
Ptr<TranslationBasedLocalOutlierRejector> tblor = makePtr<TranslationBasedLocalOutlierRejector>();
RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh");
tblor->setRansacParams(ransacParams);
outlierRejector = tblor;
}
#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
if (gpu)
{
Ptr<KeypointBasedMotionEstimatorGpu> kbest = makePtr<KeypointBasedMotionEstimatorGpu>(est);
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
#else
CV_Assert(gpu == false && "CUDA modules are not available");
#endif
Ptr<KeypointBasedMotionEstimator> kbest = makePtr<KeypointBasedMotionEstimator>(est);
kbest->setDetector(GFTTDetector::create(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
private:
bool gpu;
string prefix;
};
class MotionEstimatorL1Builder : public IMotionEstimatorBuilder
{
public:
MotionEstimatorL1Builder(CommandLineParser &command, bool use_gpu, const string &_prefix = "")
: IMotionEstimatorBuilder(command), gpu(use_gpu), prefix(_prefix) {}
virtual Ptr<ImageMotionEstimatorBase> build() CV_OVERRIDE
{
Ptr<MotionEstimatorL1> est = makePtr<MotionEstimatorL1>(motionModel(arg(prefix + "model")));
Ptr<IOutlierRejector> outlierRejector = makePtr<NullOutlierRejector>();
if (arg(prefix + "local-outlier-rejection") == "yes")
{
Ptr<TranslationBasedLocalOutlierRejector> tblor = makePtr<TranslationBasedLocalOutlierRejector>();
RansacParams ransacParams = tblor->ransacParams();
if (arg(prefix + "thresh") != "auto")
ransacParams.thresh = argf(prefix + "thresh");
tblor->setRansacParams(ransacParams);
outlierRejector = tblor;
}
#if defined(HAVE_OPENCV_CUDAIMGPROC) && defined(HAVE_OPENCV_CUDAOPTFLOW)
if (gpu)
{
Ptr<KeypointBasedMotionEstimatorGpu> kbest = makePtr<KeypointBasedMotionEstimatorGpu>(est);
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
#else
CV_Assert(gpu == false && "CUDA modules are not available");
#endif
Ptr<KeypointBasedMotionEstimator> kbest = makePtr<KeypointBasedMotionEstimator>(est);
kbest->setDetector(GFTTDetector::create(argi(prefix + "nkps")));
kbest->setOutlierRejector(outlierRejector);
return kbest;
}
private:
bool gpu;
string prefix;
};
int main(int argc, const char **argv)
{
try
{
const char *keys =
"{ @1 | | }"
"{ m model | affine | }"
"{ lp lin-prog-motion-est | no | }"
"{ subset | auto | }"
"{ thresh | auto | }"
"{ outlier-ratio | 0.5 | }"
"{ min-inlier-ratio | 0.1 | }"
"{ nkps | 1000 | }"
"{ extra-kps | 0 | }"
"{ local-outlier-rejection | no | }"
"{ sm save-motions | no | }"
"{ lm load-motions | no | }"
"{ r radius | 15 | }"
"{ stdev | auto | }"
"{ lps lin-prog-stab | no | }"
"{ lps-trim-ratio | auto | }"
"{ lps-w1 | 1 | }"
"{ lps-w2 | 10 | }"
"{ lps-w3 | 100 | }"
"{ lps-w4 | 100 | }"
"{ deblur | no | }"
"{ deblur-sens | 0.1 | }"
"{ et est-trim | yes | }"
"{ t trim-ratio | 0.1 | }"
"{ ic incl-constr | no | }"
"{ bm border-mode | replicate | }"
"{ mosaic | no | }"
"{ ms mosaic-stdev | 10.0 | }"
"{ mi motion-inpaint | no | }"
"{ mi-dist-thresh | 5.0 | }"
"{ ci color-inpaint | no | }"
"{ ci-radius | 2 | }"
"{ ws wobble-suppress | no | }"
"{ ws-period | 30 | }"
"{ ws-model | homography | }"
"{ ws-subset | auto | }"
"{ ws-thresh | auto | }"
"{ ws-outlier-ratio | 0.5 | }"
"{ ws-min-inlier-ratio | 0.1 | }"
"{ ws-nkps | 1000 | }"
"{ ws-extra-kps | 0 | }"
"{ ws-local-outlier-rejection | no | }"
"{ ws-lp | no | }"
"{ sm2 save-motions2 | no | }"
"{ lm2 load-motions2 | no | }"
"{ gpu | no | }"
"{ o output | stabilized.avi | }"
"{ fps | auto | }"
"{ q quiet | | }"
"{ h help | | }";
CommandLineParser cmd(argc, argv, keys);
// parse command arguments
if (argb("help"))
{
printHelp();
return 0;
}
if (arg("gpu") == "yes")
{
cout << "initializing GPU..."; cout.flush();
Mat hostTmp = Mat::zeros(1, 1, CV_32F);
cuda::GpuMat deviceTmp;
deviceTmp.upload(hostTmp);
cout << endl;
}
StabilizerBase *stabilizer = 0;
// check if source video is specified
string inputPath = arg(0);
if (inputPath.empty())
throw runtime_error("specify video file path");
// get source video parameters
Ptr<VideoFileSource> source = makePtr<VideoFileSource>(inputPath);
cout << "frame count (rough): " << source->count() << endl;
if (arg("fps") == "auto")
outputFps = source->fps();
else
outputFps = argd("fps");
// prepare motion estimation builders
Ptr<IMotionEstimatorBuilder> motionEstBuilder;
if (arg("lin-prog-motion-est") == "yes")
motionEstBuilder.reset(new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes"));
else
motionEstBuilder.reset(new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes"));
Ptr<IMotionEstimatorBuilder> wsMotionEstBuilder;
if (arg("ws-lp") == "yes")
wsMotionEstBuilder.reset(new MotionEstimatorL1Builder(cmd, arg("gpu") == "yes", "ws-"));
else
wsMotionEstBuilder.reset(new MotionEstimatorRansacL2Builder(cmd, arg("gpu") == "yes", "ws-"));
// determine whether we must use one pass or two pass stabilizer
bool isTwoPass =
arg("est-trim") == "yes" || arg("wobble-suppress") == "yes" || arg("lin-prog-stab") == "yes";
if (isTwoPass)
{
// we must use two pass stabilizer
TwoPassStabilizer *twoPassStabilizer = new TwoPassStabilizer();
stabilizer = twoPassStabilizer;
twoPassStabilizer->setEstimateTrimRatio(arg("est-trim") == "yes");
// determine stabilization technique
if (arg("lin-prog-stab") == "yes")
{
Ptr<LpMotionStabilizer> stab = makePtr<LpMotionStabilizer>();
stab->setFrameSize(Size(source->width(), source->height()));
stab->setTrimRatio(arg("lps-trim-ratio") == "auto" ? argf("trim-ratio") : argf("lps-trim-ratio"));
stab->setWeight1(argf("lps-w1"));
stab->setWeight2(argf("lps-w2"));
stab->setWeight3(argf("lps-w3"));
stab->setWeight4(argf("lps-w4"));
twoPassStabilizer->setMotionStabilizer(stab);
}
else if (arg("stdev") == "auto")
twoPassStabilizer->setMotionStabilizer(makePtr<GaussianMotionFilter>(argi("radius")));
else
twoPassStabilizer->setMotionStabilizer(makePtr<GaussianMotionFilter>(argi("radius"), argf("stdev")));
// init wobble suppressor if necessary
if (arg("wobble-suppress") == "yes")
{
Ptr<MoreAccurateMotionWobbleSuppressorBase> ws = makePtr<MoreAccurateMotionWobbleSuppressor>();
if (arg("gpu") == "yes")
#ifdef HAVE_OPENCV_CUDAWARPING
ws = makePtr<MoreAccurateMotionWobbleSuppressorGpu>();
#else
throw runtime_error("OpenCV is built without CUDA support");
#endif
ws->setMotionEstimator(wsMotionEstBuilder->build());
ws->setPeriod(argi("ws-period"));
twoPassStabilizer->setWobbleSuppressor(ws);
MotionModel model = ws->motionEstimator()->motionModel();
if (arg("load-motions2") != "no")
{
ws->setMotionEstimator(makePtr<FromFileMotionReader>(arg("load-motions2")));
ws->motionEstimator()->setMotionModel(model);
}
if (arg("save-motions2") != "no")
{
ws->setMotionEstimator(makePtr<ToFileMotionWriter>(arg("save-motions2"), ws->motionEstimator()));
ws->motionEstimator()->setMotionModel(model);
}
}
}
else
{
// we must use one pass stabilizer
OnePassStabilizer *onePassStabilizer = new OnePassStabilizer();
stabilizer = onePassStabilizer;
if (arg("stdev") == "auto")
onePassStabilizer->setMotionFilter(makePtr<GaussianMotionFilter>(argi("radius")));
else
onePassStabilizer->setMotionFilter(makePtr<GaussianMotionFilter>(argi("radius"), argf("stdev")));
}
stabilizer->setFrameSource(source);
stabilizer->setMotionEstimator(motionEstBuilder->build());
// cast stabilizer to simple frame source interface to read stabilized frames
stabilizedFrames.reset(dynamic_cast<IFrameSource*>(stabilizer));
MotionModel model = stabilizer->motionEstimator()->motionModel();
if (arg("load-motions") != "no")
{
stabilizer->setMotionEstimator(makePtr<FromFileMotionReader>(arg("load-motions")));
stabilizer->motionEstimator()->setMotionModel(model);
}
if (arg("save-motions") != "no")
{
stabilizer->setMotionEstimator(makePtr<ToFileMotionWriter>(arg("save-motions"), stabilizer->motionEstimator()));
stabilizer->motionEstimator()->setMotionModel(model);
}
stabilizer->setRadius(argi("radius"));
// init deblurer
if (arg("deblur") == "yes")
{
Ptr<WeightingDeblurer> deblurer = makePtr<WeightingDeblurer>();
deblurer->setRadius(argi("radius"));
deblurer->setSensitivity(argf("deblur-sens"));
stabilizer->setDeblurer(deblurer);
}
// set up trimming parameters
stabilizer->setTrimRatio(argf("trim-ratio"));
stabilizer->setCorrectionForInclusion(arg("incl-constr") == "yes");
if (arg("border-mode") == "reflect")
stabilizer->setBorderMode(BORDER_REFLECT);
else if (arg("border-mode") == "replicate")
stabilizer->setBorderMode(BORDER_REPLICATE);
else if (arg("border-mode") == "const")
stabilizer->setBorderMode(BORDER_CONSTANT);
else
throw runtime_error("unknown border extrapolation mode: "
+ cmd.get<string>("border-mode"));
// init inpainter
InpaintingPipeline *inpainters = new InpaintingPipeline();
Ptr<InpainterBase> inpainters_(inpainters);
if (arg("mosaic") == "yes")
{
Ptr<ConsistentMosaicInpainter> inp = makePtr<ConsistentMosaicInpainter>();
inp->setStdevThresh(argf("mosaic-stdev"));
inpainters->pushBack(inp);
}
if (arg("motion-inpaint") == "yes")
{
Ptr<MotionInpainter> inp = makePtr<MotionInpainter>();
inp->setDistThreshold(argf("mi-dist-thresh"));
inpainters->pushBack(inp);
}
if (arg("color-inpaint") == "average")
inpainters->pushBack(makePtr<ColorAverageInpainter>());
else if (arg("color-inpaint") == "ns")
inpainters->pushBack(makePtr<ColorInpainter>(int(INPAINT_NS), argd("ci-radius")));
else if (arg("color-inpaint") == "telea")
inpainters->pushBack(makePtr<ColorInpainter>(int(INPAINT_TELEA), argd("ci-radius")));
else if (arg("color-inpaint") != "no")
throw runtime_error("unknown color inpainting method: " + arg("color-inpaint"));
if (!inpainters->empty())
{
inpainters->setRadius(argi("radius"));
stabilizer->setInpainter(inpainters_);
}
if (arg("output") != "no")
outputPath = arg("output");
quietMode = argb("quiet");
run();
}
catch (const exception &e)
{
cout << "error: " << e.what() << endl;
stabilizedFrames.release();
return -1;
}
stabilizedFrames.release();
return 0;
}
MotionModel motionModel(const string &str)
{
if (str == "transl")
return MM_TRANSLATION;
if (str == "transl_and_scale")
return MM_TRANSLATION_AND_SCALE;
if (str == "rigid")
return MM_RIGID;
if (str == "similarity")
return MM_SIMILARITY;
if (str == "affine")
return MM_AFFINE;
if (str == "homography")
return MM_HOMOGRAPHY;
throw runtime_error("unknown motion model: " + str);
}