mirror of
https://github.com/opencv/opencv.git
synced 2025-06-07 17:44:04 +08:00
Merge pull request #3679 from mshabunin:remove-algorithm-factory
This commit is contained in:
commit
7b270f4c69
@ -122,7 +122,6 @@ CV_INLINE CvParamLattice cvDefaultParamLattice( void )
|
|||||||
#define CV_TYPE_NAME_ML_SVM "opencv-ml-svm"
|
#define CV_TYPE_NAME_ML_SVM "opencv-ml-svm"
|
||||||
#define CV_TYPE_NAME_ML_KNN "opencv-ml-knn"
|
#define CV_TYPE_NAME_ML_KNN "opencv-ml-knn"
|
||||||
#define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian"
|
#define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian"
|
||||||
#define CV_TYPE_NAME_ML_EM "opencv-ml-em"
|
|
||||||
#define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree"
|
#define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree"
|
||||||
#define CV_TYPE_NAME_ML_TREE "opencv-ml-tree"
|
#define CV_TYPE_NAME_ML_TREE "opencv-ml-tree"
|
||||||
#define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp"
|
#define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp"
|
||||||
@ -562,100 +561,6 @@ private:
|
|||||||
CvSVM& operator = (const CvSVM&);
|
CvSVM& operator = (const CvSVM&);
|
||||||
};
|
};
|
||||||
|
|
||||||
/****************************************************************************************\
|
|
||||||
* Expectation - Maximization *
|
|
||||||
\****************************************************************************************/
|
|
||||||
namespace cv
|
|
||||||
{
|
|
||||||
class EM : public Algorithm
|
|
||||||
{
|
|
||||||
public:
|
|
||||||
// Type of covariation matrices
|
|
||||||
enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL};
|
|
||||||
|
|
||||||
// Default parameters
|
|
||||||
enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};
|
|
||||||
|
|
||||||
// The initial step
|
|
||||||
enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
|
|
||||||
|
|
||||||
CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
|
|
||||||
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
|
|
||||||
EM::DEFAULT_MAX_ITERS, FLT_EPSILON));
|
|
||||||
|
|
||||||
virtual ~EM();
|
|
||||||
CV_WRAP virtual void clear();
|
|
||||||
|
|
||||||
CV_WRAP virtual bool train(InputArray samples,
|
|
||||||
OutputArray logLikelihoods=noArray(),
|
|
||||||
OutputArray labels=noArray(),
|
|
||||||
OutputArray probs=noArray());
|
|
||||||
|
|
||||||
CV_WRAP virtual bool trainE(InputArray samples,
|
|
||||||
InputArray means0,
|
|
||||||
InputArray covs0=noArray(),
|
|
||||||
InputArray weights0=noArray(),
|
|
||||||
OutputArray logLikelihoods=noArray(),
|
|
||||||
OutputArray labels=noArray(),
|
|
||||||
OutputArray probs=noArray());
|
|
||||||
|
|
||||||
CV_WRAP virtual bool trainM(InputArray samples,
|
|
||||||
InputArray probs0,
|
|
||||||
OutputArray logLikelihoods=noArray(),
|
|
||||||
OutputArray labels=noArray(),
|
|
||||||
OutputArray probs=noArray());
|
|
||||||
|
|
||||||
CV_WRAP Vec2d predict(InputArray sample,
|
|
||||||
OutputArray probs=noArray()) const;
|
|
||||||
|
|
||||||
CV_WRAP bool isTrained() const;
|
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
virtual void read(const FileNode& fn);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
|
|
||||||
virtual void setTrainData(int startStep, const Mat& samples,
|
|
||||||
const Mat* probs0,
|
|
||||||
const Mat* means0,
|
|
||||||
const std::vector<Mat>* covs0,
|
|
||||||
const Mat* weights0);
|
|
||||||
|
|
||||||
bool doTrain(int startStep,
|
|
||||||
OutputArray logLikelihoods,
|
|
||||||
OutputArray labels,
|
|
||||||
OutputArray probs);
|
|
||||||
virtual void eStep();
|
|
||||||
virtual void mStep();
|
|
||||||
|
|
||||||
void clusterTrainSamples();
|
|
||||||
void decomposeCovs();
|
|
||||||
void computeLogWeightDivDet();
|
|
||||||
|
|
||||||
Vec2d computeProbabilities(const Mat& sample, Mat* probs) const;
|
|
||||||
|
|
||||||
// all inner matrices have type CV_64FC1
|
|
||||||
CV_PROP_RW int nclusters;
|
|
||||||
CV_PROP_RW int covMatType;
|
|
||||||
CV_PROP_RW int maxIters;
|
|
||||||
CV_PROP_RW double epsilon;
|
|
||||||
|
|
||||||
Mat trainSamples;
|
|
||||||
Mat trainProbs;
|
|
||||||
Mat trainLogLikelihoods;
|
|
||||||
Mat trainLabels;
|
|
||||||
|
|
||||||
CV_PROP Mat weights;
|
|
||||||
CV_PROP Mat means;
|
|
||||||
CV_PROP std::vector<Mat> covs;
|
|
||||||
|
|
||||||
std::vector<Mat> covsEigenValues;
|
|
||||||
std::vector<Mat> covsRotateMats;
|
|
||||||
std::vector<Mat> invCovsEigenValues;
|
|
||||||
Mat logWeightDivDet;
|
|
||||||
};
|
|
||||||
} // namespace cv
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* Decision Tree *
|
* Decision Tree *
|
||||||
\****************************************************************************************/\
|
\****************************************************************************************/\
|
||||||
@ -2155,8 +2060,6 @@ typedef CvGBTreesParams GradientBoostingTreeParams;
|
|||||||
typedef CvGBTrees GradientBoostingTrees;
|
typedef CvGBTrees GradientBoostingTrees;
|
||||||
|
|
||||||
template<> void DefaultDeleter<CvDTreeSplit>::operator ()(CvDTreeSplit* obj) const;
|
template<> void DefaultDeleter<CvDTreeSplit>::operator ()(CvDTreeSplit* obj) const;
|
||||||
|
|
||||||
bool initModule_ml(void);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif // __cplusplus
|
#endif // __cplusplus
|
||||||
|
@ -100,7 +100,7 @@ RECURSIVE = YES
|
|||||||
EXCLUDE =
|
EXCLUDE =
|
||||||
EXCLUDE_SYMLINKS = NO
|
EXCLUDE_SYMLINKS = NO
|
||||||
EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp
|
EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp
|
||||||
EXCLUDE_SYMBOLS = cv::DataType<*> int
|
EXCLUDE_SYMBOLS = cv::DataType<*> int void
|
||||||
EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@
|
EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@
|
||||||
EXAMPLE_PATTERNS = *
|
EXAMPLE_PATTERNS = *
|
||||||
EXAMPLE_RECURSIVE = YES
|
EXAMPLE_RECURSIVE = YES
|
||||||
@ -243,7 +243,11 @@ PREDEFINED = __cplusplus=1 \
|
|||||||
CV_NORETURN= \
|
CV_NORETURN= \
|
||||||
CV_DEFAULT(x)=" = x" \
|
CV_DEFAULT(x)=" = x" \
|
||||||
CV_NEON=1 \
|
CV_NEON=1 \
|
||||||
FLANN_DEPRECATED=
|
FLANN_DEPRECATED= \
|
||||||
|
"CV_PURE_PROPERTY(type, name)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
|
||||||
|
"CV_IMPL_PROPERTY(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
|
||||||
|
"CV_IMPL_PROPERTY_S(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(const type & val);" \
|
||||||
|
"CV_IMPL_PROPERTY_RO(type, name, x)= virtual type get##name() const;"
|
||||||
EXPAND_AS_DEFINED =
|
EXPAND_AS_DEFINED =
|
||||||
SKIP_FUNCTION_MACROS = YES
|
SKIP_FUNCTION_MACROS = YES
|
||||||
TAGFILES =
|
TAGFILES =
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
Introduction to Support Vector Machines {#tutorial_introduction_to_svm}
|
Introduction to Support Vector Machines {#tutorial_introduction_to_svm}
|
||||||
=======================================
|
=======================================
|
||||||
|
|
||||||
@todo update this tutorial
|
|
||||||
|
|
||||||
Goal
|
Goal
|
||||||
----
|
----
|
||||||
|
|
||||||
@ -31,13 +29,11 @@ understand that this is done only because our intuition is better built from exa
|
|||||||
to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space
|
to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space
|
||||||
whose dimension is higher than two.
|
whose dimension is higher than two.
|
||||||
|
|
||||||
In the above picture you can see that there exists multiple
|
In the above picture you can see that there exists multiple lines that offer a solution to the
|
||||||
lines that offer a solution to the problem. Is any of them better than the others? We can
|
problem. Is any of them better than the others? We can intuitively define a criterion to estimate
|
||||||
intuitively define a criterion to estimate the worth of the lines:
|
the worth of the lines: <em> A line is bad if it passes too close to the points because it will be
|
||||||
|
noise sensitive and it will not generalize correctly. </em> Therefore, our goal should be to find
|
||||||
- A line is bad if it passes too close to the points because it will be noise sensitive and it will
|
the line passing as far as possible from all points.
|
||||||
not generalize correctly. Therefore, our goal should be to find the line passing as far as
|
|
||||||
possible from all points.
|
|
||||||
|
|
||||||
Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest
|
Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest
|
||||||
minimum distance to the training examples. Twice, this distance receives the important name of
|
minimum distance to the training examples. Twice, this distance receives the important name of
|
||||||
@ -57,7 +53,7 @@ where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bi
|
|||||||
|
|
||||||
@sa A more in depth description of this and hyperplanes you can find in the section 4.5 (*Seperating
|
@sa A more in depth description of this and hyperplanes you can find in the section 4.5 (*Seperating
|
||||||
Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H.
|
Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H.
|
||||||
Friedman.
|
Friedman (@cite HTF01).
|
||||||
|
|
||||||
The optimal hyperplane can be represented in an infinite number of different ways by
|
The optimal hyperplane can be represented in an infinite number of different ways by
|
||||||
scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible
|
scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible
|
||||||
@ -107,17 +103,14 @@ Explanation
|
|||||||
|
|
||||||
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
|
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
|
||||||
two different classes; one of the classes consists of one point and the other of three points.
|
two different classes; one of the classes consists of one point and the other of three points.
|
||||||
@code{.cpp}
|
|
||||||
float labels[4] = {1.0, -1.0, -1.0, -1.0};
|
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup1
|
||||||
float trainingData[4][2] = {{501, 10}, {255, 10}, {501, 255}, {10, 501}};
|
|
||||||
@endcode
|
|
||||||
The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be
|
The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be
|
||||||
stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays
|
stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays
|
||||||
defined above:
|
defined above:
|
||||||
@code{.cpp}
|
|
||||||
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
|
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup2
|
||||||
Mat labelsMat (4, 1, CV_32FC1, labels);
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
-# **Set up SVM's parameters**
|
-# **Set up SVM's parameters**
|
||||||
|
|
||||||
@ -126,42 +119,35 @@ Explanation
|
|||||||
used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using
|
used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using
|
||||||
a kernel function to raise the dimensionality of the examples, etc). As a consequence of this,
|
a kernel function to raise the dimensionality of the examples, etc). As a consequence of this,
|
||||||
we have to define some parameters before training the SVM. These parameters are stored in an
|
we have to define some parameters before training the SVM. These parameters are stored in an
|
||||||
object of the class @ref cv::ml::SVM::Params .
|
object of the class @ref cv::ml::SVM.
|
||||||
@code{.cpp}
|
|
||||||
ml::SVM::Params params;
|
|
||||||
params.svmType = ml::SVM::C_SVC;
|
|
||||||
params.kernelType = ml::SVM::LINEAR;
|
|
||||||
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6);
|
|
||||||
@endcode
|
|
||||||
- *Type of SVM*. We choose here the type **ml::SVM::C_SVC** that can be used for n-class
|
|
||||||
classification (n \f$\geq\f$ 2). This parameter is defined in the attribute
|
|
||||||
*ml::SVM::Params.svmType*.
|
|
||||||
|
|
||||||
The important feature of the type of SVM **CvSVM::C_SVC** deals with imperfect separation of classes (i.e. when the training data is non-linearly separable). This feature is not important here since the data is linearly separable and we chose this SVM type only for being the most commonly used.
|
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp init
|
||||||
|
|
||||||
|
Here:
|
||||||
|
- *Type of SVM*. We choose here the type @ref cv::ml::SVM::C_SVC "C_SVC" that can be used for
|
||||||
|
n-class classification (n \f$\geq\f$ 2). The important feature of this type is that it deals
|
||||||
|
with imperfect separation of classes (i.e. when the training data is non-linearly separable).
|
||||||
|
This feature is not important here since the data is linearly separable and we chose this SVM
|
||||||
|
type only for being the most commonly used.
|
||||||
|
|
||||||
- *Type of SVM kernel*. We have not talked about kernel functions since they are not
|
- *Type of SVM kernel*. We have not talked about kernel functions since they are not
|
||||||
interesting for the training data we are dealing with. Nevertheless, let's explain briefly
|
interesting for the training data we are dealing with. Nevertheless, let's explain briefly now
|
||||||
now the main idea behind a kernel function. It is a mapping done to the training data to
|
the main idea behind a kernel function. It is a mapping done to the training data to improve
|
||||||
improve its resemblance to a linearly separable set of data. This mapping consists of
|
its resemblance to a linearly separable set of data. This mapping consists of increasing the
|
||||||
increasing the dimensionality of the data and is done efficiently using a kernel function.
|
dimensionality of the data and is done efficiently using a kernel function. We choose here the
|
||||||
We choose here the type **ml::SVM::LINEAR** which means that no mapping is done. This
|
type @ref cv::ml::SVM::LINEAR "LINEAR" which means that no mapping is done. This parameter is
|
||||||
parameter is defined in the attribute *ml::SVMParams.kernel_type*.
|
defined using cv::ml::SVM::setKernel.
|
||||||
|
|
||||||
- *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a
|
- *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a
|
||||||
constrained quadratic optimization problem in an **iterative** fashion. Here we specify a
|
constrained quadratic optimization problem in an **iterative** fashion. Here we specify a
|
||||||
maximum number of iterations and a tolerance error so we allow the algorithm to finish in
|
maximum number of iterations and a tolerance error so we allow the algorithm to finish in
|
||||||
less number of steps even if the optimal hyperplane has not been computed yet. This
|
less number of steps even if the optimal hyperplane has not been computed yet. This
|
||||||
parameter is defined in a structure @ref cv::cvTermCriteria .
|
parameter is defined in a structure @ref cv::TermCriteria .
|
||||||
|
|
||||||
-# **Train the SVM**
|
-# **Train the SVM**
|
||||||
|
We call the method @ref cv::ml::SVM::train to build the SVM model.
|
||||||
|
|
||||||
We call the method
|
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp train
|
||||||
[CvSVM::train](http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#cvsvm-train)
|
|
||||||
to build the SVM model.
|
|
||||||
@code{.cpp}
|
|
||||||
CvSVM SVM;
|
|
||||||
SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
-# **Regions classified by the SVM**
|
-# **Regions classified by the SVM**
|
||||||
|
|
||||||
@ -170,22 +156,8 @@ Explanation
|
|||||||
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
|
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
|
||||||
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
|
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
|
||||||
green if it is the class with label 1 and in blue if it is the class with label -1.
|
green if it is the class with label 1 and in blue if it is the class with label -1.
|
||||||
@code{.cpp}
|
|
||||||
Vec3b green(0,255,0), blue (255,0,0);
|
|
||||||
|
|
||||||
for (int i = 0; i < image.rows; ++i)
|
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show
|
||||||
for (int j = 0; j < image.cols; ++j)
|
|
||||||
{
|
|
||||||
Mat sampleMat = (Mat_<float>(1,2) << i,j);
|
|
||||||
float response = SVM.predict(sampleMat);
|
|
||||||
|
|
||||||
if (response == 1)
|
|
||||||
image.at<Vec3b>(j, i) = green;
|
|
||||||
else
|
|
||||||
if (response == -1)
|
|
||||||
image.at<Vec3b>(j, i) = blue;
|
|
||||||
}
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
-# **Support vectors**
|
-# **Support vectors**
|
||||||
|
|
||||||
@ -193,15 +165,8 @@ Explanation
|
|||||||
The method @ref cv::ml::SVM::getSupportVectors obtain all of the support
|
The method @ref cv::ml::SVM::getSupportVectors obtain all of the support
|
||||||
vectors. We have used this methods here to find the training examples that are
|
vectors. We have used this methods here to find the training examples that are
|
||||||
support vectors and highlight them.
|
support vectors and highlight them.
|
||||||
@code{.cpp}
|
|
||||||
int c = SVM.get_support_vector_count();
|
|
||||||
|
|
||||||
for (int i = 0; i < c; ++i)
|
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show_vectors
|
||||||
{
|
|
||||||
const float* v = SVM.get_support_vector(i); // get and then highlight with grayscale
|
|
||||||
circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
|
|
||||||
}
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
Results
|
Results
|
||||||
-------
|
-------
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
Support Vector Machines for Non-Linearly Separable Data {#tutorial_non_linear_svms}
|
Support Vector Machines for Non-Linearly Separable Data {#tutorial_non_linear_svms}
|
||||||
=======================================================
|
=======================================================
|
||||||
|
|
||||||
@todo update this tutorial
|
|
||||||
|
|
||||||
Goal
|
Goal
|
||||||
----
|
----
|
||||||
|
|
||||||
@ -10,21 +8,20 @@ In this tutorial you will learn how to:
|
|||||||
|
|
||||||
- Define the optimization problem for SVMs when it is not possible to separate linearly the
|
- Define the optimization problem for SVMs when it is not possible to separate linearly the
|
||||||
training data.
|
training data.
|
||||||
- How to configure the parameters in @ref cv::ml::SVM::Params to adapt your SVM for this class of
|
- How to configure the parameters to adapt your SVM for this class of problems.
|
||||||
problems.
|
|
||||||
|
|
||||||
Motivation
|
Motivation
|
||||||
----------
|
----------
|
||||||
|
|
||||||
Why is it interesting to extend the SVM optimation problem in order to handle non-linearly separable
|
Why is it interesting to extend the SVM optimation problem in order to handle non-linearly separable
|
||||||
training data? Most of the applications in which SVMs are used in computer vision require a more
|
training data? Most of the applications in which SVMs are used in computer vision require a more
|
||||||
powerful tool than a simple linear classifier. This stems from the fact that in these tasks **the
|
powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the
|
||||||
training data can be rarely separated using an hyperplane**.
|
training data can be rarely separated using an hyperplane__.
|
||||||
|
|
||||||
Consider one of these tasks, for example, face detection. The training data in this case is composed
|
Consider one of these tasks, for example, face detection. The training data in this case is composed
|
||||||
by a set of images that are faces and another set of images that are non-faces (*every other thing
|
by a set of images that are faces and another set of images that are non-faces (_every other thing
|
||||||
in the world except from faces*). This training data is too complex so as to find a representation
|
in the world except from faces_). This training data is too complex so as to find a representation
|
||||||
of each sample (*feature vector*) that could make the whole set of faces linearly separable from the
|
of each sample (_feature vector_) that could make the whole set of faces linearly separable from the
|
||||||
whole set of non-faces.
|
whole set of non-faces.
|
||||||
|
|
||||||
Extension of the Optimization Problem
|
Extension of the Optimization Problem
|
||||||
@ -32,13 +29,13 @@ Extension of the Optimization Problem
|
|||||||
|
|
||||||
Remember that using SVMs we obtain a separating hyperplane. Therefore, since the training data is
|
Remember that using SVMs we obtain a separating hyperplane. Therefore, since the training data is
|
||||||
now non-linearly separable, we must admit that the hyperplane found will misclassify some of the
|
now non-linearly separable, we must admit that the hyperplane found will misclassify some of the
|
||||||
samples. This *misclassification* is a new variable in the optimization that must be taken into
|
samples. This _misclassification_ is a new variable in the optimization that must be taken into
|
||||||
account. The new model has to include both the old requirement of finding the hyperplane that gives
|
account. The new model has to include both the old requirement of finding the hyperplane that gives
|
||||||
the biggest margin and the new one of generalizing the training data correctly by not allowing too
|
the biggest margin and the new one of generalizing the training data correctly by not allowing too
|
||||||
many classification errors.
|
many classification errors.
|
||||||
|
|
||||||
We start here from the formulation of the optimization problem of finding the hyperplane which
|
We start here from the formulation of the optimization problem of finding the hyperplane which
|
||||||
maximizes the **margin** (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm):
|
maximizes the __margin__ (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm):
|
||||||
|
|
||||||
\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i\f]
|
\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i\f]
|
||||||
|
|
||||||
@ -50,8 +47,8 @@ constant times the number of misclassification errors in the training data, i.e.
|
|||||||
|
|
||||||
However, this one is not a very good solution since, among some other reasons, we do not distinguish
|
However, this one is not a very good solution since, among some other reasons, we do not distinguish
|
||||||
between samples that are misclassified with a small distance to their appropriate decision region or
|
between samples that are misclassified with a small distance to their appropriate decision region or
|
||||||
samples that are not. Therefore, a better solution will take into account the *distance of the
|
samples that are not. Therefore, a better solution will take into account the _distance of the
|
||||||
misclassified samples to their correct decision regions*, i.e.:
|
misclassified samples to their correct decision regions_, i.e.:
|
||||||
|
|
||||||
\f[\min ||\beta||^{2} + C \text{(distance of misclassified samples to their correct regions)}\f]
|
\f[\min ||\beta||^{2} + C \text{(distance of misclassified samples to their correct regions)}\f]
|
||||||
|
|
||||||
@ -68,7 +65,7 @@ distances of the rest of the samples are zero since they lay already in their co
|
|||||||
region.
|
region.
|
||||||
|
|
||||||
The red and blue lines that appear on the picture are the margins to each one of the
|
The red and blue lines that appear on the picture are the margins to each one of the
|
||||||
decision regions. It is very **important** to realize that each of the \f$\xi_{i}\f$ goes from a
|
decision regions. It is very __important__ to realize that each of the \f$\xi_{i}\f$ goes from a
|
||||||
misclassified training sample to the margin of its appropriate region.
|
misclassified training sample to the margin of its appropriate region.
|
||||||
|
|
||||||
Finally, the new formulation for the optimization problem is:
|
Finally, the new formulation for the optimization problem is:
|
||||||
@ -79,26 +76,25 @@ How should the parameter C be chosen? It is obvious that the answer to this ques
|
|||||||
the training data is distributed. Although there is no general answer, it is useful to take into
|
the training data is distributed. Although there is no general answer, it is useful to take into
|
||||||
account these rules:
|
account these rules:
|
||||||
|
|
||||||
- Large values of C give solutions with *less misclassification errors* but a *smaller margin*.
|
- Large values of C give solutions with _less misclassification errors_ but a _smaller margin_.
|
||||||
Consider that in this case it is expensive to make misclassification errors. Since the aim of
|
Consider that in this case it is expensive to make misclassification errors. Since the aim of
|
||||||
the optimization is to minimize the argument, few misclassifications errors are allowed.
|
the optimization is to minimize the argument, few misclassifications errors are allowed.
|
||||||
- Small values of C give solutions with *bigger margin* and *more classification errors*. In this
|
- Small values of C give solutions with _bigger margin_ and _more classification errors_. In this
|
||||||
case the minimization does not consider that much the term of the sum so it focuses more on
|
case the minimization does not consider that much the term of the sum so it focuses more on
|
||||||
finding a hyperplane with big margin.
|
finding a hyperplane with big margin.
|
||||||
|
|
||||||
Source Code
|
Source Code
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
You may also find the source code and these video file in the
|
You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or
|
||||||
`samples/cpp/tutorial_code/gpu/non_linear_svms/non_linear_svms` folder of the OpenCV source library
|
[download it from here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
|
||||||
or [download it from here ](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
|
|
||||||
|
|
||||||
@includelineno cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp
|
@includelineno cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp
|
||||||
|
|
||||||
Explanation
|
Explanation
|
||||||
-----------
|
-----------
|
||||||
|
|
||||||
-# **Set up the training data**
|
-# __Set up the training data__
|
||||||
|
|
||||||
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
|
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
|
||||||
two different classes. To make the exercise more appealing, the training data is generated
|
two different classes. To make the exercise more appealing, the training data is generated
|
||||||
@ -107,136 +103,67 @@ Explanation
|
|||||||
We have divided the generation of the training data into two main parts.
|
We have divided the generation of the training data into two main parts.
|
||||||
|
|
||||||
In the first part we generate data for both classes that is linearly separable.
|
In the first part we generate data for both classes that is linearly separable.
|
||||||
@code{.cpp}
|
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup1
|
||||||
// Generate random points for the class 1
|
|
||||||
Mat trainClass = trainData.rowRange(0, nLinearSamples);
|
|
||||||
// The x coordinate of the points is in [0, 0.4)
|
|
||||||
Mat c = trainClass.colRange(0, 1);
|
|
||||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4 * WIDTH));
|
|
||||||
// The y coordinate of the points is in [0, 1)
|
|
||||||
c = trainClass.colRange(1,2);
|
|
||||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
|
||||||
|
|
||||||
// Generate random points for the class 2
|
|
||||||
trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
|
|
||||||
// The x coordinate of the points is in [0.6, 1]
|
|
||||||
c = trainClass.colRange(0 , 1);
|
|
||||||
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
|
|
||||||
// The y coordinate of the points is in [0, 1)
|
|
||||||
c = trainClass.colRange(1,2);
|
|
||||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
|
||||||
@endcode
|
|
||||||
In the second part we create data for both classes that is non-linearly separable, data that
|
In the second part we create data for both classes that is non-linearly separable, data that
|
||||||
overlaps.
|
overlaps.
|
||||||
@code{.cpp}
|
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup2
|
||||||
// Generate random points for the classes 1 and 2
|
|
||||||
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
|
|
||||||
// The x coordinate of the points is in [0.4, 0.6)
|
|
||||||
c = trainClass.colRange(0,1);
|
|
||||||
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
|
|
||||||
// The y coordinate of the points is in [0, 1)
|
|
||||||
c = trainClass.colRange(1,2);
|
|
||||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
-# **Set up SVM's parameters**
|
-# __Set up SVM's parameters__
|
||||||
|
|
||||||
@sa
|
@note In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the
|
||||||
In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the atributes of the
|
atributes of the class @ref cv::ml::SVM that we configure here before training the SVM.
|
||||||
class @ref cv::ml::SVM::Params that we configure here before training the SVM.
|
|
||||||
|
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp init
|
||||||
|
|
||||||
@code{.cpp}
|
|
||||||
CvSVMParams params;
|
|
||||||
params.svm_type = SVM::C_SVC;
|
|
||||||
params.C = 0.1;
|
|
||||||
params.kernel_type = SVM::LINEAR;
|
|
||||||
params.term_crit = TermCriteria(TermCriteria::ITER, (int)1e7, 1e-6);
|
|
||||||
@endcode
|
|
||||||
There are just two differences between the configuration we do here and the one that was done in
|
There are just two differences between the configuration we do here and the one that was done in
|
||||||
the previous tutorial (tutorial_introduction_to_svm) that we use as reference.
|
the previous tutorial (@ref tutorial_introduction_to_svm) that we use as reference.
|
||||||
|
|
||||||
- *CvSVM::C_SVC*. We chose here a small value of this parameter in order not to punish too much
|
- _C_. We chose here a small value of this parameter in order not to punish too much the
|
||||||
the misclassification errors in the optimization. The idea of doing this stems from the will
|
misclassification errors in the optimization. The idea of doing this stems from the will of
|
||||||
of obtaining a solution close to the one intuitively expected. However, we recommend to get a
|
obtaining a solution close to the one intuitively expected. However, we recommend to get a
|
||||||
better insight of the problem by making adjustments to this parameter.
|
better insight of the problem by making adjustments to this parameter.
|
||||||
|
|
||||||
@note Here there are just very few points in the overlapping region between classes, giving a smaller value to **FRAC_LINEAR_SEP** the density of points can be incremented and the impact of the parameter **CvSVM::C_SVC** explored deeply.
|
@note In this case there are just very few points in the overlapping region between classes.
|
||||||
|
By giving a smaller value to __FRAC_LINEAR_SEP__ the density of points can be incremented and the
|
||||||
|
impact of the parameter _C_ explored deeply.
|
||||||
|
|
||||||
- *Termination Criteria of the algorithm*. The maximum number of iterations has to be
|
- _Termination Criteria of the algorithm_. The maximum number of iterations has to be
|
||||||
increased considerably in order to solve correctly a problem with non-linearly separable
|
increased considerably in order to solve correctly a problem with non-linearly separable
|
||||||
training data. In particular, we have increased in five orders of magnitude this value.
|
training data. In particular, we have increased in five orders of magnitude this value.
|
||||||
|
|
||||||
-# **Train the SVM**
|
-# __Train the SVM__
|
||||||
|
|
||||||
We call the method @ref cv::ml::SVM::train to build the SVM model. Watch out that the training
|
We call the method @ref cv::ml::SVM::train to build the SVM model. Watch out that the training
|
||||||
process may take a quite long time. Have patiance when your run the program.
|
process may take a quite long time. Have patiance when your run the program.
|
||||||
@code{.cpp}
|
|
||||||
CvSVM svm;
|
|
||||||
svm.train(trainData, labels, Mat(), Mat(), params);
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
-# **Show the Decision Regions**
|
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp train
|
||||||
|
|
||||||
|
-# __Show the Decision Regions__
|
||||||
|
|
||||||
The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In
|
The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In
|
||||||
this example we have used this method in order to color the space depending on the prediction done
|
this example we have used this method in order to color the space depending on the prediction done
|
||||||
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
|
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
|
||||||
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
|
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
|
||||||
dark green if it is the class with label 1 and in dark blue if it is the class with label 2.
|
dark green if it is the class with label 1 and in dark blue if it is the class with label 2.
|
||||||
@code{.cpp}
|
|
||||||
Vec3b green(0,100,0), blue (100,0,0);
|
|
||||||
for (int i = 0; i < I.rows; ++i)
|
|
||||||
for (int j = 0; j < I.cols; ++j)
|
|
||||||
{
|
|
||||||
Mat sampleMat = (Mat_<float>(1,2) << i, j);
|
|
||||||
float response = svm.predict(sampleMat);
|
|
||||||
|
|
||||||
if (response == 1) I.at<Vec3b>(j, i) = green;
|
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show
|
||||||
else if (response == 2) I.at<Vec3b>(j, i) = blue;
|
|
||||||
}
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
-# **Show the training data**
|
-# __Show the training data__
|
||||||
|
|
||||||
The method @ref cv::circle is used to show the samples that compose the training data. The samples
|
The method @ref cv::circle is used to show the samples that compose the training data. The samples
|
||||||
of the class labeled with 1 are shown in light green and in light blue the samples of the class
|
of the class labeled with 1 are shown in light green and in light blue the samples of the class
|
||||||
labeled with 2.
|
labeled with 2.
|
||||||
@code{.cpp}
|
|
||||||
int thick = -1;
|
|
||||||
int lineType = 8;
|
|
||||||
float px, py;
|
|
||||||
// Class 1
|
|
||||||
for (int i = 0; i < NTRAINING_SAMPLES; ++i)
|
|
||||||
{
|
|
||||||
px = trainData.at<float>(i,0);
|
|
||||||
py = trainData.at<float>(i,1);
|
|
||||||
circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick, lineType);
|
|
||||||
}
|
|
||||||
// Class 2
|
|
||||||
for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; ++i)
|
|
||||||
{
|
|
||||||
px = trainData.at<float>(i,0);
|
|
||||||
py = trainData.at<float>(i,1);
|
|
||||||
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
|
|
||||||
}
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
-# **Support vectors**
|
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_data
|
||||||
|
|
||||||
|
-# __Support vectors__
|
||||||
|
|
||||||
We use here a couple of methods to obtain information about the support vectors. The method
|
We use here a couple of methods to obtain information about the support vectors. The method
|
||||||
@ref cv::ml::SVM::getSupportVectors obtain all support vectors.
|
@ref cv::ml::SVM::getSupportVectors obtain all support vectors. We have used this methods here
|
||||||
We have used this methods here to find the training examples that are
|
to find the training examples that are support vectors and highlight them.
|
||||||
support vectors and highlight them.
|
|
||||||
@code{.cpp}
|
|
||||||
thick = 2;
|
|
||||||
lineType = 8;
|
|
||||||
int x = svm.get_support_vector_count();
|
|
||||||
|
|
||||||
for (int i = 0; i < x; ++i)
|
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_vectors
|
||||||
{
|
|
||||||
const float* v = svm.get_support_vector(i);
|
|
||||||
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
|
|
||||||
}
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
Results
|
Results
|
||||||
-------
|
-------
|
||||||
|
@ -200,8 +200,6 @@ public:
|
|||||||
|
|
||||||
void setCallback(const Ptr<LMSolver::Callback>& _cb) { cb = _cb; }
|
void setCallback(const Ptr<LMSolver::Callback>& _cb) { cb = _cb; }
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
Ptr<LMSolver::Callback> cb;
|
Ptr<LMSolver::Callback> cb;
|
||||||
|
|
||||||
double epsx;
|
double epsx;
|
||||||
@ -211,15 +209,8 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(LMSolverImpl, "LMSolver",
|
|
||||||
obj.info()->addParam(obj, "epsx", obj.epsx);
|
|
||||||
obj.info()->addParam(obj, "epsf", obj.epsf);
|
|
||||||
obj.info()->addParam(obj, "maxIters", obj.maxIters);
|
|
||||||
obj.info()->addParam(obj, "printInterval", obj.printInterval))
|
|
||||||
|
|
||||||
Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters)
|
Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters)
|
||||||
{
|
{
|
||||||
CV_Assert( !LMSolverImpl_info_auto.name().empty() );
|
|
||||||
return makePtr<LMSolverImpl>(cb, maxIters);
|
return makePtr<LMSolverImpl>(cb, maxIters);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -256,8 +256,6 @@ public:
|
|||||||
|
|
||||||
void setCallback(const Ptr<PointSetRegistrator::Callback>& _cb) { cb = _cb; }
|
void setCallback(const Ptr<PointSetRegistrator::Callback>& _cb) { cb = _cb; }
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
Ptr<PointSetRegistrator::Callback> cb;
|
Ptr<PointSetRegistrator::Callback> cb;
|
||||||
int modelPoints;
|
int modelPoints;
|
||||||
bool checkPartialSubsets;
|
bool checkPartialSubsets;
|
||||||
@ -378,25 +376,12 @@ public:
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(RANSACPointSetRegistrator, "PointSetRegistrator.RANSAC",
|
|
||||||
obj.info()->addParam(obj, "threshold", obj.threshold);
|
|
||||||
obj.info()->addParam(obj, "confidence", obj.confidence);
|
|
||||||
obj.info()->addParam(obj, "maxIters", obj.maxIters))
|
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(LMeDSPointSetRegistrator, "PointSetRegistrator.LMeDS",
|
|
||||||
obj.info()->addParam(obj, "confidence", obj.confidence);
|
|
||||||
obj.info()->addParam(obj, "maxIters", obj.maxIters))
|
|
||||||
|
|
||||||
|
|
||||||
Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
|
Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
|
||||||
int _modelPoints, double _threshold,
|
int _modelPoints, double _threshold,
|
||||||
double _confidence, int _maxIters)
|
double _confidence, int _maxIters)
|
||||||
{
|
{
|
||||||
CV_Assert( !RANSACPointSetRegistrator_info_auto.name().empty() );
|
|
||||||
return Ptr<PointSetRegistrator>(
|
return Ptr<PointSetRegistrator>(
|
||||||
new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters));
|
new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters));
|
||||||
}
|
}
|
||||||
@ -405,7 +390,6 @@ Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegis
|
|||||||
Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
|
Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
|
||||||
int _modelPoints, double _confidence, int _maxIters)
|
int _modelPoints, double _confidence, int _maxIters)
|
||||||
{
|
{
|
||||||
CV_Assert( !LMeDSPointSetRegistrator_info_auto.name().empty() );
|
|
||||||
return Ptr<PointSetRegistrator>(
|
return Ptr<PointSetRegistrator>(
|
||||||
new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters));
|
new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters));
|
||||||
}
|
}
|
||||||
|
@ -1010,8 +1010,6 @@ public:
|
|||||||
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
|
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
int getMinDisparity() const { return params.minDisparity; }
|
int getMinDisparity() const { return params.minDisparity; }
|
||||||
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
|
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
|
||||||
|
|
||||||
|
@ -865,8 +865,6 @@ public:
|
|||||||
StereoMatcher::DISP_SCALE*params.speckleRange, buffer);
|
StereoMatcher::DISP_SCALE*params.speckleRange, buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
int getMinDisparity() const { return params.minDisparity; }
|
int getMinDisparity() const { return params.minDisparity; }
|
||||||
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
|
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }
|
||||||
|
|
||||||
|
@ -2768,8 +2768,6 @@ public:
|
|||||||
//////////////////////////////////////// Algorithm ////////////////////////////////////
|
//////////////////////////////////////// Algorithm ////////////////////////////////////
|
||||||
|
|
||||||
class CV_EXPORTS Algorithm;
|
class CV_EXPORTS Algorithm;
|
||||||
class CV_EXPORTS AlgorithmInfo;
|
|
||||||
struct CV_EXPORTS AlgorithmInfoData;
|
|
||||||
|
|
||||||
template<typename _Tp> struct ParamType {};
|
template<typename _Tp> struct ParamType {};
|
||||||
|
|
||||||
@ -2782,32 +2780,13 @@ matching, graph-cut etc.), background subtraction (which can be done using mixtu
|
|||||||
models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck
|
models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck
|
||||||
etc.).
|
etc.).
|
||||||
|
|
||||||
The class provides the following features for all derived classes:
|
|
||||||
|
|
||||||
- so called "virtual constructor". That is, each Algorithm derivative is registered at program
|
|
||||||
start and you can get the list of registered algorithms and create instance of a particular
|
|
||||||
algorithm by its name (see Algorithm::create). If you plan to add your own algorithms, it is
|
|
||||||
good practice to add a unique prefix to your algorithms to distinguish them from other
|
|
||||||
algorithms.
|
|
||||||
- setting/retrieving algorithm parameters by name. If you used video capturing functionality
|
|
||||||
from OpenCV videoio module, you are probably familar with cvSetCaptureProperty(),
|
|
||||||
cvGetCaptureProperty(), VideoCapture::set() and VideoCapture::get(). Algorithm provides
|
|
||||||
similar method where instead of integer id's you specify the parameter names as text strings.
|
|
||||||
See Algorithm::set and Algorithm::get for details.
|
|
||||||
- reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store
|
|
||||||
all its parameters and then read them back. There is no need to re-implement it each time.
|
|
||||||
|
|
||||||
Here is example of SIFT use in your application via Algorithm interface:
|
Here is example of SIFT use in your application via Algorithm interface:
|
||||||
@code
|
@code
|
||||||
#include "opencv2/opencv.hpp"
|
#include "opencv2/opencv.hpp"
|
||||||
#include "opencv2/xfeatures2d.hpp"
|
#include "opencv2/xfeatures2d.hpp"
|
||||||
|
|
||||||
using namespace cv::xfeatures2d;
|
using namespace cv::xfeatures2d;
|
||||||
|
|
||||||
...
|
|
||||||
|
|
||||||
Ptr<Feature2D> sift = SIFT::create();
|
Ptr<Feature2D> sift = SIFT::create();
|
||||||
|
|
||||||
FileStorage fs("sift_params.xml", FileStorage::READ);
|
FileStorage fs("sift_params.xml", FileStorage::READ);
|
||||||
if( fs.isOpened() ) // if we have file with parameters, read them
|
if( fs.isOpened() ) // if we have file with parameters, read them
|
||||||
{
|
{
|
||||||
@ -2817,323 +2796,73 @@ Here is example of SIFT use in your application via Algorithm interface:
|
|||||||
else // else modify the parameters and store them; user can later edit the file to use different parameters
|
else // else modify the parameters and store them; user can later edit the file to use different parameters
|
||||||
{
|
{
|
||||||
sift->setContrastThreshold(0.01f); // lower the contrast threshold, compared to the default value
|
sift->setContrastThreshold(0.01f); // lower the contrast threshold, compared to the default value
|
||||||
|
|
||||||
{
|
{
|
||||||
WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
|
WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
|
||||||
sift->write(fs);
|
sift->write(fs);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Mat image = imread("myimage.png", 0), descriptors;
|
Mat image = imread("myimage.png", 0), descriptors;
|
||||||
vector<KeyPoint> keypoints;
|
vector<KeyPoint> keypoints;
|
||||||
sift->detectAndCompute(image, noArray(), keypoints, descriptors);
|
sift->detectAndCompute(image, noArray(), keypoints, descriptors);
|
||||||
@endcode
|
@endcode
|
||||||
|
|
||||||
Creating Own Algorithms
|
|
||||||
-----------------------
|
|
||||||
If you want to make your own algorithm, derived from Algorithm, you should basically follow a few
|
|
||||||
conventions and add a little semi-standard piece of code to your class:
|
|
||||||
- Make a class and specify Algorithm as its base class.
|
|
||||||
- The algorithm parameters should be the class members. See Algorithm::get() for the list of
|
|
||||||
possible types of the parameters.
|
|
||||||
- Add public virtual method `AlgorithmInfo* info() const;` to your class.
|
|
||||||
- Add constructor function, AlgorithmInfo instance and implement the info() method. The simplest
|
|
||||||
way is to take <https://github.com/Itseez/opencv/tree/master/modules/ml/src/ml_init.cpp> as
|
|
||||||
the reference and modify it according to the list of your parameters.
|
|
||||||
- Add some public function (e.g. `initModule_<mymodule>()`) that calls info() of your algorithm
|
|
||||||
and put it into the same source file as info() implementation. This is to force C++ linker to
|
|
||||||
include this object file into the target application. See Algorithm::create() for details.
|
|
||||||
*/
|
*/
|
||||||
class CV_EXPORTS_W Algorithm
|
class CV_EXPORTS_W Algorithm
|
||||||
{
|
|
||||||
public:
|
|
||||||
Algorithm();
|
|
||||||
virtual ~Algorithm();
|
|
||||||
/**Returns the algorithm name*/
|
|
||||||
String name() const;
|
|
||||||
|
|
||||||
/** @brief returns the algorithm parameter
|
|
||||||
|
|
||||||
The method returns value of the particular parameter. Since the compiler can not deduce the
|
|
||||||
type of the returned parameter, you should specify it explicitly in angle brackets. Here are
|
|
||||||
the allowed forms of get:
|
|
||||||
|
|
||||||
- myalgo.get\<int\>("param_name")
|
|
||||||
- myalgo.get\<double\>("param_name")
|
|
||||||
- myalgo.get\<bool\>("param_name")
|
|
||||||
- myalgo.get\<String\>("param_name")
|
|
||||||
- myalgo.get\<Mat\>("param_name")
|
|
||||||
- myalgo.get\<vector\<Mat\> \>("param_name")
|
|
||||||
- myalgo.get\<Algorithm\>("param_name") (it returns Ptr\<Algorithm\>).
|
|
||||||
|
|
||||||
In some cases the actual type of the parameter can be cast to the specified type, e.g. integer
|
|
||||||
parameter can be cast to double, bool can be cast to int. But "dangerous" transformations
|
|
||||||
(string\<-\>number, double-\>int, 1x1 Mat\<-\>number, ...) are not performed and the method
|
|
||||||
will throw an exception. In the case of Mat or vector\<Mat\> parameters the method does not
|
|
||||||
clone the matrix data, so do not modify the matrices. Use Algorithm::set instead - slower, but
|
|
||||||
more safe.
|
|
||||||
@param name The parameter name.
|
|
||||||
*/
|
|
||||||
template<typename _Tp> typename ParamType<_Tp>::member_type get(const String& name) const;
|
|
||||||
/** @overload */
|
|
||||||
template<typename _Tp> typename ParamType<_Tp>::member_type get(const char* name) const;
|
|
||||||
|
|
||||||
CV_WRAP int getInt(const String& name) const;
|
|
||||||
CV_WRAP double getDouble(const String& name) const;
|
|
||||||
CV_WRAP bool getBool(const String& name) const;
|
|
||||||
CV_WRAP String getString(const String& name) const;
|
|
||||||
CV_WRAP Mat getMat(const String& name) const;
|
|
||||||
CV_WRAP std::vector<Mat> getMatVector(const String& name) const;
|
|
||||||
CV_WRAP Ptr<Algorithm> getAlgorithm(const String& name) const;
|
|
||||||
|
|
||||||
/** @brief Sets the algorithm parameter
|
|
||||||
|
|
||||||
The method sets value of the particular parameter. Some of the algorithm
|
|
||||||
parameters may be declared as read-only. If you try to set such a
|
|
||||||
parameter, you will get exception with the corresponding error message.
|
|
||||||
@param name The parameter name.
|
|
||||||
@param value The parameter value.
|
|
||||||
*/
|
|
||||||
void set(const String& name, int value);
|
|
||||||
void set(const String& name, double value);
|
|
||||||
void set(const String& name, bool value);
|
|
||||||
void set(const String& name, const String& value);
|
|
||||||
void set(const String& name, const Mat& value);
|
|
||||||
void set(const String& name, const std::vector<Mat>& value);
|
|
||||||
void set(const String& name, const Ptr<Algorithm>& value);
|
|
||||||
template<typename _Tp> void set(const String& name, const Ptr<_Tp>& value);
|
|
||||||
|
|
||||||
CV_WRAP void setInt(const String& name, int value);
|
|
||||||
CV_WRAP void setDouble(const String& name, double value);
|
|
||||||
CV_WRAP void setBool(const String& name, bool value);
|
|
||||||
CV_WRAP void setString(const String& name, const String& value);
|
|
||||||
CV_WRAP void setMat(const String& name, const Mat& value);
|
|
||||||
CV_WRAP void setMatVector(const String& name, const std::vector<Mat>& value);
|
|
||||||
CV_WRAP void setAlgorithm(const String& name, const Ptr<Algorithm>& value);
|
|
||||||
template<typename _Tp> void setAlgorithm(const String& name, const Ptr<_Tp>& value);
|
|
||||||
|
|
||||||
void set(const char* name, int value);
|
|
||||||
void set(const char* name, double value);
|
|
||||||
void set(const char* name, bool value);
|
|
||||||
void set(const char* name, const String& value);
|
|
||||||
void set(const char* name, const Mat& value);
|
|
||||||
void set(const char* name, const std::vector<Mat>& value);
|
|
||||||
void set(const char* name, const Ptr<Algorithm>& value);
|
|
||||||
template<typename _Tp> void set(const char* name, const Ptr<_Tp>& value);
|
|
||||||
|
|
||||||
void setInt(const char* name, int value);
|
|
||||||
void setDouble(const char* name, double value);
|
|
||||||
void setBool(const char* name, bool value);
|
|
||||||
void setString(const char* name, const String& value);
|
|
||||||
void setMat(const char* name, const Mat& value);
|
|
||||||
void setMatVector(const char* name, const std::vector<Mat>& value);
|
|
||||||
void setAlgorithm(const char* name, const Ptr<Algorithm>& value);
|
|
||||||
template<typename _Tp> void setAlgorithm(const char* name, const Ptr<_Tp>& value);
|
|
||||||
|
|
||||||
CV_WRAP String paramHelp(const String& name) const;
|
|
||||||
int paramType(const char* name) const;
|
|
||||||
CV_WRAP int paramType(const String& name) const;
|
|
||||||
CV_WRAP void getParams(CV_OUT std::vector<String>& names) const;
|
|
||||||
|
|
||||||
/** @brief Stores algorithm parameters in a file storage
|
|
||||||
|
|
||||||
The method stores all the algorithm parameters (in alphabetic order) to
|
|
||||||
the file storage. The method is virtual. If you define your own
|
|
||||||
Algorithm derivative, your can override the method and store some extra
|
|
||||||
information. However, it's rarely needed. Here are some examples:
|
|
||||||
- SIFT feature detector (from xfeatures2d module). The class only
|
|
||||||
stores algorithm parameters and no keypoints or their descriptors.
|
|
||||||
Therefore, it's enough to store the algorithm parameters, which is
|
|
||||||
what Algorithm::write() does. Therefore, there is no dedicated
|
|
||||||
SIFT::write().
|
|
||||||
- Background subtractor (from video module). It has the algorithm
|
|
||||||
parameters and also it has the current background model. However,
|
|
||||||
the background model is not stored. First, it's rather big. Then,
|
|
||||||
if you have stored the background model, it would likely become
|
|
||||||
irrelevant on the next run (because of shifted camera, changed
|
|
||||||
background, different lighting etc.). Therefore,
|
|
||||||
BackgroundSubtractorMOG and BackgroundSubtractorMOG2 also rely on
|
|
||||||
the standard Algorithm::write() to store just the algorithm
|
|
||||||
parameters.
|
|
||||||
- Expectation Maximization (from ml module). The algorithm finds
|
|
||||||
mixture of gaussians that approximates user data best of all. In
|
|
||||||
this case the model may be re-used on the next run to test new
|
|
||||||
data against the trained statistical model. So EM needs to store
|
|
||||||
the model. However, since the model is described by a few
|
|
||||||
parameters that are available as read-only algorithm parameters
|
|
||||||
(i.e. they are available via EM::get()), EM also relies on
|
|
||||||
Algorithm::write() to store both EM parameters and the model
|
|
||||||
(represented by read-only algorithm parameters).
|
|
||||||
@param fs File storage.
|
|
||||||
*/
|
|
||||||
virtual void write(FileStorage& fs) const;
|
|
||||||
|
|
||||||
/** @brief Reads algorithm parameters from a file storage
|
|
||||||
|
|
||||||
The method reads all the algorithm parameters from the specified node of
|
|
||||||
a file storage. Similarly to Algorithm::write(), if you implement an
|
|
||||||
algorithm that needs to read some extra data and/or re-compute some
|
|
||||||
internal data, you may override the method.
|
|
||||||
@param fn File node of the file storage.
|
|
||||||
*/
|
|
||||||
virtual void read(const FileNode& fn);
|
|
||||||
|
|
||||||
typedef Algorithm* (*Constructor)(void);
|
|
||||||
typedef int (Algorithm::*Getter)() const;
|
|
||||||
typedef void (Algorithm::*Setter)(int);
|
|
||||||
|
|
||||||
/** @brief Returns the list of registered algorithms
|
|
||||||
|
|
||||||
This static method returns the list of registered algorithms in
|
|
||||||
alphabetical order. Here is how to use it :
|
|
||||||
@code{.cpp}
|
|
||||||
vector<String> algorithms;
|
|
||||||
Algorithm::getList(algorithms);
|
|
||||||
cout << "Algorithms: " << algorithms.size() << endl;
|
|
||||||
for (size_t i=0; i < algorithms.size(); i++)
|
|
||||||
cout << algorithms[i] << endl;
|
|
||||||
@endcode
|
|
||||||
@param algorithms The output vector of algorithm names.
|
|
||||||
*/
|
|
||||||
CV_WRAP static void getList(CV_OUT std::vector<String>& algorithms);
|
|
||||||
CV_WRAP static Ptr<Algorithm> _create(const String& name);
|
|
||||||
|
|
||||||
/** @brief Creates algorithm instance by name
|
|
||||||
|
|
||||||
This static method creates a new instance of the specified algorithm. If
|
|
||||||
there is no such algorithm, the method will silently return a null
|
|
||||||
pointer. Also, you should specify the particular Algorithm subclass as
|
|
||||||
_Tp (or simply Algorithm if you do not know it at that point). :
|
|
||||||
@code{.cpp}
|
|
||||||
Ptr<BackgroundSubtractor> bgfg = Algorithm::create<BackgroundSubtractor>("BackgroundSubtractor.MOG2");
|
|
||||||
@endcode
|
|
||||||
@note This is important note about seemingly mysterious behavior of
|
|
||||||
Algorithm::create() when it returns NULL while it should not. The reason
|
|
||||||
is simple - Algorithm::create() resides in OpenCV's core module and the
|
|
||||||
algorithms are implemented in other modules. If you create algorithms
|
|
||||||
dynamically, C++ linker may decide to throw away the modules where the
|
|
||||||
actual algorithms are implemented, since you do not call any functions
|
|
||||||
from the modules. To avoid this problem, you need to call
|
|
||||||
initModule_\<modulename\>(); somewhere in the beginning of the program
|
|
||||||
before Algorithm::create(). For example, call initModule_xfeatures2d()
|
|
||||||
in order to use SURF/SIFT, call initModule_ml() to use expectation
|
|
||||||
maximization etc.
|
|
||||||
@param name The algorithm name, one of the names returned by Algorithm::getList().
|
|
||||||
*/
|
|
||||||
template<typename _Tp> static Ptr<_Tp> create(const String& name);
|
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; }
|
|
||||||
};
|
|
||||||
|
|
||||||
/** @todo document */
|
|
||||||
class CV_EXPORTS AlgorithmInfo
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
friend class Algorithm;
|
Algorithm();
|
||||||
AlgorithmInfo(const String& name, Algorithm::Constructor create);
|
virtual ~Algorithm();
|
||||||
~AlgorithmInfo();
|
|
||||||
void get(const Algorithm* algo, const char* name, int argType, void* value) const;
|
|
||||||
void addParam_(Algorithm& algo, const char* name, int argType,
|
|
||||||
void* value, bool readOnly,
|
|
||||||
Algorithm::Getter getter, Algorithm::Setter setter,
|
|
||||||
const String& help=String());
|
|
||||||
String paramHelp(const char* name) const;
|
|
||||||
int paramType(const char* name) const;
|
|
||||||
void getParams(std::vector<String>& names) const;
|
|
||||||
|
|
||||||
void write(const Algorithm* algo, FileStorage& fs) const;
|
/** @brief Stores algorithm parameters in a file storage
|
||||||
void read(Algorithm* algo, const FileNode& fn) const;
|
*/
|
||||||
String name() const;
|
virtual void write(FileStorage& fs) const { (void)fs; }
|
||||||
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
/** @brief Reads algorithm parameters from a file storage
|
||||||
int& value, bool readOnly=false,
|
*/
|
||||||
int (Algorithm::*getter)()=0,
|
virtual void read(const FileNode& fn) { (void)fn; }
|
||||||
void (Algorithm::*setter)(int)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
bool& value, bool readOnly=false,
|
|
||||||
int (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(int)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
double& value, bool readOnly=false,
|
|
||||||
double (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(double)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
String& value, bool readOnly=false,
|
|
||||||
String (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(const String&)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
Mat& value, bool readOnly=false,
|
|
||||||
Mat (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(const Mat&)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
std::vector<Mat>& value, bool readOnly=false,
|
|
||||||
std::vector<Mat> (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(const std::vector<Mat>&)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
Ptr<Algorithm>& value, bool readOnly=false,
|
|
||||||
Ptr<Algorithm> (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(const Ptr<Algorithm>&)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
float& value, bool readOnly=false,
|
|
||||||
float (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(float)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
unsigned int& value, bool readOnly=false,
|
|
||||||
unsigned int (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(unsigned int)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
uint64& value, bool readOnly=false,
|
|
||||||
uint64 (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(uint64)=0,
|
|
||||||
const String& help=String());
|
|
||||||
void addParam(Algorithm& algo, const char* name,
|
|
||||||
uchar& value, bool readOnly=false,
|
|
||||||
uchar (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(uchar)=0,
|
|
||||||
const String& help=String());
|
|
||||||
template<typename _Tp, typename _Base> void addParam(Algorithm& algo, const char* name,
|
|
||||||
Ptr<_Tp>& value, bool readOnly=false,
|
|
||||||
Ptr<_Tp> (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(const Ptr<_Tp>&)=0,
|
|
||||||
const String& help=String());
|
|
||||||
template<typename _Tp> void addParam(Algorithm& algo, const char* name,
|
|
||||||
Ptr<_Tp>& value, bool readOnly=false,
|
|
||||||
Ptr<_Tp> (Algorithm::*getter)()=0,
|
|
||||||
void (Algorithm::*setter)(const Ptr<_Tp>&)=0,
|
|
||||||
const String& help=String());
|
|
||||||
protected:
|
|
||||||
AlgorithmInfoData* data;
|
|
||||||
void set(Algorithm* algo, const char* name, int argType,
|
|
||||||
const void* value, bool force=false) const;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/** @todo document */
|
// define properties
|
||||||
struct CV_EXPORTS Param
|
|
||||||
{
|
|
||||||
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, UCHAR=11 };
|
|
||||||
|
|
||||||
Param();
|
#define CV_PURE_PROPERTY(type, name) \
|
||||||
Param(int _type, bool _readonly, int _offset,
|
CV_WRAP virtual type get##name() const = 0; \
|
||||||
Algorithm::Getter _getter=0,
|
CV_WRAP virtual void set##name(type val) = 0;
|
||||||
Algorithm::Setter _setter=0,
|
|
||||||
const String& _help=String());
|
#define CV_PURE_PROPERTY_S(type, name) \
|
||||||
int type;
|
CV_WRAP virtual type get##name() const = 0; \
|
||||||
int offset;
|
CV_WRAP virtual void set##name(const type & val) = 0;
|
||||||
bool readonly;
|
|
||||||
Algorithm::Getter getter;
|
#define CV_PURE_PROPERTY_RO(type, name) \
|
||||||
Algorithm::Setter setter;
|
CV_WRAP virtual type get##name() const = 0;
|
||||||
String help;
|
|
||||||
|
// basic property implementation
|
||||||
|
|
||||||
|
#define CV_IMPL_PROPERTY_RO(type, name, member) \
|
||||||
|
inline type get##name() const { return member; }
|
||||||
|
|
||||||
|
#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
|
||||||
|
CV_IMPL_PROPERTY_RO(r_type, name, member) \
|
||||||
|
inline void set##name(w_type val) { member = val; }
|
||||||
|
|
||||||
|
#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
|
||||||
|
r_type get##name() const { return internal_obj.get##internal_name(); } \
|
||||||
|
void set##name(w_type val) { internal_obj.set##internal_name(val); }
|
||||||
|
|
||||||
|
#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
|
||||||
|
#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
|
||||||
|
|
||||||
|
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
|
||||||
|
#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
|
||||||
|
|
||||||
|
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
|
||||||
|
#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
|
||||||
|
|
||||||
|
struct Param {
|
||||||
|
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7,
|
||||||
|
UNSIGNED_INT=8, UINT64=9, UCHAR=11 };
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
template<> struct ParamType<bool>
|
template<> struct ParamType<bool>
|
||||||
{
|
{
|
||||||
typedef bool const_param_type;
|
typedef bool const_param_type;
|
||||||
|
@ -412,84 +412,6 @@ int print(const Matx<_Tp, m, n>& matx, FILE* stream = stdout)
|
|||||||
return print(Formatter::get()->format(cv::Mat(matx)), stream);
|
return print(Formatter::get()->format(cv::Mat(matx)), stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
////////////////////////////////////////// Algorithm //////////////////////////////////////////
|
|
||||||
|
|
||||||
template<typename _Tp> inline
|
|
||||||
Ptr<_Tp> Algorithm::create(const String& name)
|
|
||||||
{
|
|
||||||
return _create(name).dynamicCast<_Tp>();
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename _Tp> inline
|
|
||||||
void Algorithm::set(const char* _name, const Ptr<_Tp>& value)
|
|
||||||
{
|
|
||||||
Ptr<Algorithm> algo_ptr = value. template dynamicCast<cv::Algorithm>();
|
|
||||||
if (!algo_ptr) {
|
|
||||||
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
|
|
||||||
}
|
|
||||||
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename _Tp> inline
|
|
||||||
void Algorithm::set(const String& _name, const Ptr<_Tp>& value)
|
|
||||||
{
|
|
||||||
this->set<_Tp>(_name.c_str(), value);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename _Tp> inline
|
|
||||||
void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value)
|
|
||||||
{
|
|
||||||
Ptr<Algorithm> algo_ptr = value. template ptr<cv::Algorithm>();
|
|
||||||
if (!algo_ptr) {
|
|
||||||
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
|
|
||||||
}
|
|
||||||
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename _Tp> inline
|
|
||||||
void Algorithm::setAlgorithm(const String& _name, const Ptr<_Tp>& value)
|
|
||||||
{
|
|
||||||
this->set<_Tp>(_name.c_str(), value);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename _Tp> inline
|
|
||||||
typename ParamType<_Tp>::member_type Algorithm::get(const String& _name) const
|
|
||||||
{
|
|
||||||
typename ParamType<_Tp>::member_type value;
|
|
||||||
info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value);
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename _Tp> inline
|
|
||||||
typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const
|
|
||||||
{
|
|
||||||
typename ParamType<_Tp>::member_type value;
|
|
||||||
info()->get(this, _name, ParamType<_Tp>::type, &value);
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename _Tp, typename _Base> inline
|
|
||||||
void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly,
|
|
||||||
Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
|
|
||||||
const String& help)
|
|
||||||
{
|
|
||||||
//TODO: static assert: _Tp inherits from _Base
|
|
||||||
addParam_(algo, parameter, ParamType<_Base>::type, &value, readOnly,
|
|
||||||
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
|
|
||||||
}
|
|
||||||
|
|
||||||
template<typename _Tp> inline
|
|
||||||
void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly,
|
|
||||||
Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
|
|
||||||
const String& help)
|
|
||||||
{
|
|
||||||
//TODO: static assert: _Tp inherits from Algorithm
|
|
||||||
addParam_(algo, parameter, ParamType<Algorithm>::type, &value, readOnly,
|
|
||||||
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
|
|
||||||
}
|
|
||||||
|
|
||||||
//! @endcond
|
//! @endcond
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
|
@ -129,40 +129,6 @@ namespace cv
|
|||||||
CV_EXPORTS const char* currentParallelFramework();
|
CV_EXPORTS const char* currentParallelFramework();
|
||||||
} //namespace cv
|
} //namespace cv
|
||||||
|
|
||||||
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \
|
|
||||||
static inline ::cv::Algorithm* create##classname##_hidden() \
|
|
||||||
{ \
|
|
||||||
return new classname; \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
static inline ::cv::Ptr< ::cv::Algorithm> create##classname##_ptr_hidden() \
|
|
||||||
{ \
|
|
||||||
return ::cv::makePtr<classname>(); \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
static inline ::cv::AlgorithmInfo& classname##_info() \
|
|
||||||
{ \
|
|
||||||
static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname##_hidden); \
|
|
||||||
return classname##_info_var; \
|
|
||||||
} \
|
|
||||||
\
|
|
||||||
static ::cv::AlgorithmInfo& classname##_info_auto = classname##_info(); \
|
|
||||||
\
|
|
||||||
::cv::AlgorithmInfo* classname::info() const \
|
|
||||||
{ \
|
|
||||||
static volatile bool initialized = false; \
|
|
||||||
\
|
|
||||||
if( !initialized ) \
|
|
||||||
{ \
|
|
||||||
initialized = true; \
|
|
||||||
classname obj; \
|
|
||||||
memberinit; \
|
|
||||||
} \
|
|
||||||
return &classname##_info(); \
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
/****************************************************************************************\
|
/****************************************************************************************\
|
||||||
* Common declarations *
|
* Common declarations *
|
||||||
\****************************************************************************************/
|
\****************************************************************************************/
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -140,8 +140,6 @@ namespace
|
|||||||
public:
|
public:
|
||||||
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
|
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
|
||||||
|
|
||||||
cv::AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
void apply(cv::InputArray src, cv::OutputArray dst);
|
void apply(cv::InputArray src, cv::OutputArray dst);
|
||||||
void apply(InputArray src, OutputArray dst, Stream& stream);
|
void apply(InputArray src, OutputArray dst, Stream& stream);
|
||||||
|
|
||||||
@ -167,11 +165,6 @@ namespace
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE_CUDA",
|
|
||||||
obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
|
|
||||||
obj.info()->addParam(obj, "tilesX", obj.tilesX_);
|
|
||||||
obj.info()->addParam(obj, "tilesY", obj.tilesY_))
|
|
||||||
|
|
||||||
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
|
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
|
||||||
{
|
{
|
||||||
apply(_src, _dst, Stream::Null());
|
apply(_src, _dst, Stream::Null());
|
||||||
|
@ -310,10 +310,10 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1,
|
|||||||
{
|
{
|
||||||
cv::Mat flow;
|
cv::Mat flow;
|
||||||
|
|
||||||
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
||||||
alg->set("medianFiltering", 1);
|
alg->setMedianFiltering(1);
|
||||||
alg->set("innerIterations", 1);
|
alg->setInnerIterations(1);
|
||||||
alg->set("outerIterations", 300);
|
alg->setOuterIterations(300);
|
||||||
TEST_CYCLE() alg->calc(frame0, frame1, flow);
|
TEST_CYCLE() alg->calc(frame0, frame1, flow);
|
||||||
|
|
||||||
CPU_SANITY_CHECK(flow);
|
CPU_SANITY_CHECK(flow);
|
||||||
|
@ -369,11 +369,11 @@ CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy)
|
|||||||
cv::cuda::GpuMat d_flow;
|
cv::cuda::GpuMat d_flow;
|
||||||
d_alg->calc(loadMat(frame0), loadMat(frame1), d_flow);
|
d_alg->calc(loadMat(frame0), loadMat(frame1), d_flow);
|
||||||
|
|
||||||
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
||||||
alg->set("medianFiltering", 1);
|
alg->setMedianFiltering(1);
|
||||||
alg->set("innerIterations", 1);
|
alg->setInnerIterations(1);
|
||||||
alg->set("outerIterations", d_alg->getNumIterations());
|
alg->setOuterIterations(d_alg->getNumIterations());
|
||||||
alg->set("gamma", gamma);
|
alg->setGamma(gamma);
|
||||||
|
|
||||||
cv::Mat flow;
|
cv::Mat flow;
|
||||||
alg->calc(frame0, frame1, flow);
|
alg->calc(frame0, frame1, flow);
|
||||||
|
@ -320,8 +320,6 @@ namespace
|
|||||||
public:
|
public:
|
||||||
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
|
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
|
||||||
|
|
||||||
cv::AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
void apply(cv::InputArray src, cv::OutputArray dst);
|
void apply(cv::InputArray src, cv::OutputArray dst);
|
||||||
|
|
||||||
void setClipLimit(double clipLimit);
|
void setClipLimit(double clipLimit);
|
||||||
@ -351,11 +349,6 @@ namespace
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE",
|
|
||||||
obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
|
|
||||||
obj.info()->addParam(obj, "tilesX", obj.tilesX_);
|
|
||||||
obj.info()->addParam(obj, "tilesY", obj.tilesY_))
|
|
||||||
|
|
||||||
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
|
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
|
||||||
{
|
{
|
||||||
CV_Assert( _src.type() == CV_8UC1 || _src.type() == CV_16UC1 );
|
CV_Assert( _src.type() == CV_8UC1 || _src.type() == CV_16UC1 );
|
||||||
|
@ -449,40 +449,33 @@ classes 0 and 1, one can determine that the given data instance belongs to class
|
|||||||
\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ .
|
\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ .
|
||||||
|
|
||||||
In Logistic Regression, choosing the right parameters is of utmost importance for reducing the
|
In Logistic Regression, choosing the right parameters is of utmost importance for reducing the
|
||||||
training error and ensuring high training accuracy. cv::ml::LogisticRegression::Params is the
|
training error and ensuring high training accuracy:
|
||||||
structure that defines parameters that are required to train a Logistic Regression classifier.
|
|
||||||
|
|
||||||
The learning rate is determined by cv::ml::LogisticRegression::Params.alpha. It determines how fast
|
- The learning rate can be set with @ref cv::ml::LogisticRegression::setLearningRate "setLearningRate"
|
||||||
we approach the solution. It is a positive real number.
|
method. It determines how fast we approach the solution. It is a positive real number.
|
||||||
|
|
||||||
Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported in
|
- Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported
|
||||||
LogisticRegression. It is important that we mention the number of iterations these optimization
|
in LogisticRegression. It is important that we mention the number of iterations these optimization
|
||||||
algorithms have to run. The number of iterations are mentioned by
|
algorithms have to run. The number of iterations can be set with @ref
|
||||||
cv::ml::LogisticRegression::Params.num_iters. The number of iterations can be thought as number of
|
cv::ml::LogisticRegression::setIterations "setIterations". This parameter can be thought
|
||||||
steps taken and learning rate specifies if it is a long step or a short step. These two parameters
|
as number of steps taken and learning rate specifies if it is a long step or a short step. This
|
||||||
define how fast we arrive at a possible solution.
|
and previous parameter define how fast we arrive at a possible solution.
|
||||||
|
|
||||||
In order to compensate for overfitting regularization is performed, which can be enabled by setting
|
- In order to compensate for overfitting regularization is performed, which can be enabled with
|
||||||
cv::ml::LogisticRegression::Params.regularized to a positive integer (greater than zero). One can
|
@ref cv::ml::LogisticRegression::setRegularization "setRegularization". One can specify what
|
||||||
specify what kind of regularization has to be performed by setting
|
kind of regularization has to be performed by passing one of @ref
|
||||||
cv::ml::LogisticRegression::Params.norm to REG_L1 or REG_L2 values.
|
cv::ml::LogisticRegression::RegKinds "regularization kinds" to this method.
|
||||||
|
|
||||||
LogisticRegression provides a choice of 2 training methods with Batch Gradient Descent or the Mini-
|
- Logistic regression implementation provides a choice of 2 training methods with Batch Gradient
|
||||||
Batch Gradient Descent. To specify this, set cv::ml::LogisticRegression::Params::train_method to
|
Descent or the MiniBatch Gradient Descent. To specify this, call @ref
|
||||||
either BATCH or MINI_BATCH. If training method is set to MINI_BATCH, the size of the mini batch has
|
cv::ml::LogisticRegression::setTrainMethod "setTrainMethod" with either @ref
|
||||||
to be to a postive integer using cv::ml::LogisticRegression::Params::mini_batch_size.
|
cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref
|
||||||
|
cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is
|
||||||
|
set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has
|
||||||
|
to be to a postive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize
|
||||||
|
"setMiniBatchSize".
|
||||||
|
|
||||||
A sample set of training parameters for the Logistic Regression classifier can be initialized as
|
A sample set of training parameters for the Logistic Regression classifier can be initialized as follows:
|
||||||
follows:
|
@snippet samples/cpp/logistic_regression.cpp init
|
||||||
@code{.cpp}
|
|
||||||
using namespace cv::ml;
|
|
||||||
LogisticRegression::Params params;
|
|
||||||
params.alpha = 0.5;
|
|
||||||
params.num_iters = 10000;
|
|
||||||
params.norm = LogisticRegression::REG_L2;
|
|
||||||
params.regularized = 1;
|
|
||||||
params.train_method = LogisticRegression::MINI_BATCH;
|
|
||||||
params.mini_batch_size = 10;
|
|
||||||
@endcode
|
|
||||||
|
|
||||||
@sa cv::ml::LogisticRegression
|
@sa cv::ml::LogisticRegression
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -42,84 +42,57 @@
|
|||||||
|
|
||||||
namespace cv { namespace ml {
|
namespace cv { namespace ml {
|
||||||
|
|
||||||
ANN_MLP::Params::Params()
|
struct AnnParams
|
||||||
{
|
{
|
||||||
layerSizes = Mat();
|
AnnParams()
|
||||||
activateFunc = SIGMOID_SYM;
|
{
|
||||||
fparam1 = fparam2 = 0;
|
termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 );
|
||||||
termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 );
|
trainMethod = ANN_MLP::RPROP;
|
||||||
trainMethod = RPROP;
|
bpDWScale = bpMomentScale = 0.1;
|
||||||
bpDWScale = bpMomentScale = 0.1;
|
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
|
||||||
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
|
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
|
||||||
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
TermCriteria termCrit;
|
||||||
|
int trainMethod;
|
||||||
|
|
||||||
ANN_MLP::Params::Params( const Mat& _layerSizes, int _activateFunc, double _fparam1, double _fparam2,
|
double bpDWScale;
|
||||||
TermCriteria _termCrit, int _trainMethod, double _param1, double _param2 )
|
double bpMomentScale;
|
||||||
|
|
||||||
|
double rpDW0;
|
||||||
|
double rpDWPlus;
|
||||||
|
double rpDWMinus;
|
||||||
|
double rpDWMin;
|
||||||
|
double rpDWMax;
|
||||||
|
};
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
inline T inBounds(T val, T min_val, T max_val)
|
||||||
{
|
{
|
||||||
layerSizes = _layerSizes;
|
return std::min(std::max(val, min_val), max_val);
|
||||||
activateFunc = _activateFunc;
|
|
||||||
fparam1 = _fparam1;
|
|
||||||
fparam2 = _fparam2;
|
|
||||||
termCrit = _termCrit;
|
|
||||||
trainMethod = _trainMethod;
|
|
||||||
bpDWScale = bpMomentScale = 0.1;
|
|
||||||
rpDW0 = 1.; rpDWPlus = 1.2; rpDWMinus = 0.5;
|
|
||||||
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
|
|
||||||
|
|
||||||
if( trainMethod == RPROP )
|
|
||||||
{
|
|
||||||
rpDW0 = _param1;
|
|
||||||
if( rpDW0 < FLT_EPSILON )
|
|
||||||
rpDW0 = 1.;
|
|
||||||
rpDWMin = _param2;
|
|
||||||
rpDWMin = std::max( rpDWMin, 0. );
|
|
||||||
}
|
|
||||||
else if( trainMethod == BACKPROP )
|
|
||||||
{
|
|
||||||
bpDWScale = _param1;
|
|
||||||
if( bpDWScale <= 0 )
|
|
||||||
bpDWScale = 0.1;
|
|
||||||
bpDWScale = std::max( bpDWScale, 1e-3 );
|
|
||||||
bpDWScale = std::min( bpDWScale, 1. );
|
|
||||||
bpMomentScale = _param2;
|
|
||||||
if( bpMomentScale < 0 )
|
|
||||||
bpMomentScale = 0.1;
|
|
||||||
bpMomentScale = std::min( bpMomentScale, 1. );
|
|
||||||
}
|
|
||||||
else
|
|
||||||
trainMethod = RPROP;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class ANN_MLPImpl : public ANN_MLP
|
class ANN_MLPImpl : public ANN_MLP
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
ANN_MLPImpl()
|
ANN_MLPImpl()
|
||||||
{
|
{
|
||||||
clear();
|
clear();
|
||||||
}
|
setActivationFunction( SIGMOID_SYM, 0, 0 );
|
||||||
|
setLayerSizes(Mat());
|
||||||
ANN_MLPImpl( const Params& p )
|
setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON);
|
||||||
{
|
|
||||||
clear();
|
|
||||||
setParams(p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~ANN_MLPImpl() {}
|
virtual ~ANN_MLPImpl() {}
|
||||||
|
|
||||||
void setParams(const Params& p)
|
CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.termCrit)
|
||||||
{
|
CV_IMPL_PROPERTY(double, BackpropWeightScale, params.bpDWScale)
|
||||||
params = p;
|
CV_IMPL_PROPERTY(double, BackpropMomentumScale, params.bpMomentScale)
|
||||||
create( params.layerSizes );
|
CV_IMPL_PROPERTY(double, RpropDW0, params.rpDW0)
|
||||||
set_activ_func( params.activateFunc, params.fparam1, params.fparam2 );
|
CV_IMPL_PROPERTY(double, RpropDWPlus, params.rpDWPlus)
|
||||||
}
|
CV_IMPL_PROPERTY(double, RpropDWMinus, params.rpDWMinus)
|
||||||
|
CV_IMPL_PROPERTY(double, RpropDWMin, params.rpDWMin)
|
||||||
Params getParams() const
|
CV_IMPL_PROPERTY(double, RpropDWMax, params.rpDWMax)
|
||||||
{
|
|
||||||
return params;
|
|
||||||
}
|
|
||||||
|
|
||||||
void clear()
|
void clear()
|
||||||
{
|
{
|
||||||
@ -132,7 +105,35 @@ public:
|
|||||||
|
|
||||||
int layer_count() const { return (int)layer_sizes.size(); }
|
int layer_count() const { return (int)layer_sizes.size(); }
|
||||||
|
|
||||||
void set_activ_func( int _activ_func, double _f_param1, double _f_param2 )
|
void setTrainMethod(int method, double param1, double param2)
|
||||||
|
{
|
||||||
|
if (method != ANN_MLP::RPROP && method != ANN_MLP::BACKPROP)
|
||||||
|
method = ANN_MLP::RPROP;
|
||||||
|
params.trainMethod = method;
|
||||||
|
if(method == ANN_MLP::RPROP )
|
||||||
|
{
|
||||||
|
if( param1 < FLT_EPSILON )
|
||||||
|
param1 = 1.;
|
||||||
|
params.rpDW0 = param1;
|
||||||
|
params.rpDWMin = std::max( param2, 0. );
|
||||||
|
}
|
||||||
|
else if(method == ANN_MLP::BACKPROP )
|
||||||
|
{
|
||||||
|
if( param1 <= 0 )
|
||||||
|
param1 = 0.1;
|
||||||
|
params.bpDWScale = inBounds<double>(param1, 1e-3, 1.);
|
||||||
|
if( param2 < 0 )
|
||||||
|
param2 = 0.1;
|
||||||
|
params.bpMomentScale = std::min( param2, 1. );
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int getTrainMethod() const
|
||||||
|
{
|
||||||
|
return params.trainMethod;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setActivationFunction(int _activ_func, double _f_param1, double _f_param2 )
|
||||||
{
|
{
|
||||||
if( _activ_func < 0 || _activ_func > GAUSSIAN )
|
if( _activ_func < 0 || _activ_func > GAUSSIAN )
|
||||||
CV_Error( CV_StsOutOfRange, "Unknown activation function" );
|
CV_Error( CV_StsOutOfRange, "Unknown activation function" );
|
||||||
@ -201,7 +202,12 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void create( InputArray _layer_sizes )
|
Mat getLayerSizes() const
|
||||||
|
{
|
||||||
|
return Mat_<int>(layer_sizes, true);
|
||||||
|
}
|
||||||
|
|
||||||
|
void setLayerSizes( InputArray _layer_sizes )
|
||||||
{
|
{
|
||||||
clear();
|
clear();
|
||||||
|
|
||||||
@ -700,7 +706,7 @@ public:
|
|||||||
termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1);
|
termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1);
|
||||||
termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON);
|
termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON);
|
||||||
|
|
||||||
int iter = params.trainMethod == Params::BACKPROP ?
|
int iter = params.trainMethod == ANN_MLP::BACKPROP ?
|
||||||
train_backprop( inputs, outputs, sw, termcrit ) :
|
train_backprop( inputs, outputs, sw, termcrit ) :
|
||||||
train_rprop( inputs, outputs, sw, termcrit );
|
train_rprop( inputs, outputs, sw, termcrit );
|
||||||
|
|
||||||
@ -1113,13 +1119,13 @@ public:
|
|||||||
fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1;
|
fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1;
|
||||||
|
|
||||||
fs << "training_params" << "{";
|
fs << "training_params" << "{";
|
||||||
if( params.trainMethod == Params::BACKPROP )
|
if( params.trainMethod == ANN_MLP::BACKPROP )
|
||||||
{
|
{
|
||||||
fs << "train_method" << "BACKPROP";
|
fs << "train_method" << "BACKPROP";
|
||||||
fs << "dw_scale" << params.bpDWScale;
|
fs << "dw_scale" << params.bpDWScale;
|
||||||
fs << "moment_scale" << params.bpMomentScale;
|
fs << "moment_scale" << params.bpMomentScale;
|
||||||
}
|
}
|
||||||
else if( params.trainMethod == Params::RPROP )
|
else if( params.trainMethod == ANN_MLP::RPROP )
|
||||||
{
|
{
|
||||||
fs << "train_method" << "RPROP";
|
fs << "train_method" << "RPROP";
|
||||||
fs << "dw0" << params.rpDW0;
|
fs << "dw0" << params.rpDW0;
|
||||||
@ -1186,7 +1192,7 @@ public:
|
|||||||
f_param1 = (double)fn["f_param1"];
|
f_param1 = (double)fn["f_param1"];
|
||||||
f_param2 = (double)fn["f_param2"];
|
f_param2 = (double)fn["f_param2"];
|
||||||
|
|
||||||
set_activ_func( activ_func, f_param1, f_param2 );
|
setActivationFunction( activ_func, f_param1, f_param2 );
|
||||||
|
|
||||||
min_val = (double)fn["min_val"];
|
min_val = (double)fn["min_val"];
|
||||||
max_val = (double)fn["max_val"];
|
max_val = (double)fn["max_val"];
|
||||||
@ -1194,7 +1200,7 @@ public:
|
|||||||
max_val1 = (double)fn["max_val1"];
|
max_val1 = (double)fn["max_val1"];
|
||||||
|
|
||||||
FileNode tpn = fn["training_params"];
|
FileNode tpn = fn["training_params"];
|
||||||
params = Params();
|
params = AnnParams();
|
||||||
|
|
||||||
if( !tpn.empty() )
|
if( !tpn.empty() )
|
||||||
{
|
{
|
||||||
@ -1202,13 +1208,13 @@ public:
|
|||||||
|
|
||||||
if( tmethod_name == "BACKPROP" )
|
if( tmethod_name == "BACKPROP" )
|
||||||
{
|
{
|
||||||
params.trainMethod = Params::BACKPROP;
|
params.trainMethod = ANN_MLP::BACKPROP;
|
||||||
params.bpDWScale = (double)tpn["dw_scale"];
|
params.bpDWScale = (double)tpn["dw_scale"];
|
||||||
params.bpMomentScale = (double)tpn["moment_scale"];
|
params.bpMomentScale = (double)tpn["moment_scale"];
|
||||||
}
|
}
|
||||||
else if( tmethod_name == "RPROP" )
|
else if( tmethod_name == "RPROP" )
|
||||||
{
|
{
|
||||||
params.trainMethod = Params::RPROP;
|
params.trainMethod = ANN_MLP::RPROP;
|
||||||
params.rpDW0 = (double)tpn["dw0"];
|
params.rpDW0 = (double)tpn["dw0"];
|
||||||
params.rpDWPlus = (double)tpn["dw_plus"];
|
params.rpDWPlus = (double)tpn["dw_plus"];
|
||||||
params.rpDWMinus = (double)tpn["dw_minus"];
|
params.rpDWMinus = (double)tpn["dw_minus"];
|
||||||
@ -1244,7 +1250,7 @@ public:
|
|||||||
|
|
||||||
vector<int> _layer_sizes;
|
vector<int> _layer_sizes;
|
||||||
readVectorOrMat(fn["layer_sizes"], _layer_sizes);
|
readVectorOrMat(fn["layer_sizes"], _layer_sizes);
|
||||||
create( _layer_sizes );
|
setLayerSizes( _layer_sizes );
|
||||||
|
|
||||||
int i, l_count = layer_count();
|
int i, l_count = layer_count();
|
||||||
read_params(fn);
|
read_params(fn);
|
||||||
@ -1267,11 +1273,6 @@ public:
|
|||||||
trained = true;
|
trained = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
Mat getLayerSizes() const
|
|
||||||
{
|
|
||||||
return Mat_<int>(layer_sizes, true);
|
|
||||||
}
|
|
||||||
|
|
||||||
Mat getWeights(int layerIdx) const
|
Mat getWeights(int layerIdx) const
|
||||||
{
|
{
|
||||||
CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() );
|
CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() );
|
||||||
@ -1304,17 +1305,16 @@ public:
|
|||||||
double min_val, max_val, min_val1, max_val1;
|
double min_val, max_val, min_val1, max_val1;
|
||||||
int activ_func;
|
int activ_func;
|
||||||
int max_lsize, max_buf_sz;
|
int max_lsize, max_buf_sz;
|
||||||
Params params;
|
AnnParams params;
|
||||||
RNG rng;
|
RNG rng;
|
||||||
Mutex mtx;
|
Mutex mtx;
|
||||||
bool trained;
|
bool trained;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
Ptr<ANN_MLP> ANN_MLP::create(const ANN_MLP::Params& params)
|
Ptr<ANN_MLP> ANN_MLP::create()
|
||||||
{
|
{
|
||||||
Ptr<ANN_MLPImpl> ann = makePtr<ANN_MLPImpl>(params);
|
return makePtr<ANN_MLPImpl>();
|
||||||
return ann;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}}
|
}}
|
||||||
|
@ -54,48 +54,33 @@ log_ratio( double val )
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
Boost::Params::Params()
|
BoostTreeParams::BoostTreeParams()
|
||||||
{
|
{
|
||||||
boostType = Boost::REAL;
|
boostType = Boost::REAL;
|
||||||
weakCount = 100;
|
weakCount = 100;
|
||||||
weightTrimRate = 0.95;
|
weightTrimRate = 0.95;
|
||||||
CVFolds = 0;
|
|
||||||
maxDepth = 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BoostTreeParams::BoostTreeParams( int _boostType, int _weak_count,
|
||||||
Boost::Params::Params( int _boostType, int _weak_count,
|
double _weightTrimRate)
|
||||||
double _weightTrimRate, int _maxDepth,
|
|
||||||
bool _use_surrogates, const Mat& _priors )
|
|
||||||
{
|
{
|
||||||
boostType = _boostType;
|
boostType = _boostType;
|
||||||
weakCount = _weak_count;
|
weakCount = _weak_count;
|
||||||
weightTrimRate = _weightTrimRate;
|
weightTrimRate = _weightTrimRate;
|
||||||
CVFolds = 0;
|
|
||||||
maxDepth = _maxDepth;
|
|
||||||
useSurrogates = _use_surrogates;
|
|
||||||
priors = _priors;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
class DTreesImplForBoost : public DTreesImpl
|
class DTreesImplForBoost : public DTreesImpl
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
DTreesImplForBoost() {}
|
DTreesImplForBoost()
|
||||||
|
{
|
||||||
|
params.setCVFolds(0);
|
||||||
|
params.setMaxDepth(1);
|
||||||
|
}
|
||||||
virtual ~DTreesImplForBoost() {}
|
virtual ~DTreesImplForBoost() {}
|
||||||
|
|
||||||
bool isClassifier() const { return true; }
|
bool isClassifier() const { return true; }
|
||||||
|
|
||||||
void setBParams(const Boost::Params& p)
|
|
||||||
{
|
|
||||||
bparams = p;
|
|
||||||
}
|
|
||||||
|
|
||||||
Boost::Params getBParams() const
|
|
||||||
{
|
|
||||||
return bparams;
|
|
||||||
}
|
|
||||||
|
|
||||||
void clear()
|
void clear()
|
||||||
{
|
{
|
||||||
DTreesImpl::clear();
|
DTreesImpl::clear();
|
||||||
@ -199,10 +184,6 @@ public:
|
|||||||
|
|
||||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||||
{
|
{
|
||||||
Params dp(bparams.maxDepth, bparams.minSampleCount, bparams.regressionAccuracy,
|
|
||||||
bparams.useSurrogates, bparams.maxCategories, 0,
|
|
||||||
false, false, bparams.priors);
|
|
||||||
setDParams(dp);
|
|
||||||
startTraining(trainData, flags);
|
startTraining(trainData, flags);
|
||||||
int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000;
|
int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000;
|
||||||
vector<int> sidx = w->sidx;
|
vector<int> sidx = w->sidx;
|
||||||
@ -426,12 +407,6 @@ public:
|
|||||||
void readParams( const FileNode& fn )
|
void readParams( const FileNode& fn )
|
||||||
{
|
{
|
||||||
DTreesImpl::readParams(fn);
|
DTreesImpl::readParams(fn);
|
||||||
bparams.maxDepth = params0.maxDepth;
|
|
||||||
bparams.minSampleCount = params0.minSampleCount;
|
|
||||||
bparams.regressionAccuracy = params0.regressionAccuracy;
|
|
||||||
bparams.useSurrogates = params0.useSurrogates;
|
|
||||||
bparams.maxCategories = params0.maxCategories;
|
|
||||||
bparams.priors = params0.priors;
|
|
||||||
|
|
||||||
FileNode tparams_node = fn["training_params"];
|
FileNode tparams_node = fn["training_params"];
|
||||||
// check for old layout
|
// check for old layout
|
||||||
@ -465,7 +440,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Boost::Params bparams;
|
BoostTreeParams bparams;
|
||||||
vector<double> sumResult;
|
vector<double> sumResult;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -476,6 +451,20 @@ public:
|
|||||||
BoostImpl() {}
|
BoostImpl() {}
|
||||||
virtual ~BoostImpl() {}
|
virtual ~BoostImpl() {}
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(int, BoostType, impl.bparams.boostType)
|
||||||
|
CV_IMPL_PROPERTY(int, WeakCount, impl.bparams.weakCount)
|
||||||
|
CV_IMPL_PROPERTY(double, WeightTrimRate, impl.bparams.weightTrimRate)
|
||||||
|
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
|
||||||
|
|
||||||
String getDefaultModelName() const { return "opencv_ml_boost"; }
|
String getDefaultModelName() const { return "opencv_ml_boost"; }
|
||||||
|
|
||||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||||
@ -498,9 +487,6 @@ public:
|
|||||||
impl.read(fn);
|
impl.read(fn);
|
||||||
}
|
}
|
||||||
|
|
||||||
void setBParams(const Params& p) { impl.setBParams(p); }
|
|
||||||
Params getBParams() const { return impl.getBParams(); }
|
|
||||||
|
|
||||||
int getVarCount() const { return impl.getVarCount(); }
|
int getVarCount() const { return impl.getVarCount(); }
|
||||||
|
|
||||||
bool isTrained() const { return impl.isTrained(); }
|
bool isTrained() const { return impl.isTrained(); }
|
||||||
@ -515,11 +501,9 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
Ptr<Boost> Boost::create(const Params& params)
|
Ptr<Boost> Boost::create()
|
||||||
{
|
{
|
||||||
Ptr<BoostImpl> p = makePtr<BoostImpl>();
|
return makePtr<BoostImpl>();
|
||||||
p->setBParams(params);
|
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}}
|
}}
|
||||||
|
@ -48,37 +48,49 @@ namespace ml
|
|||||||
|
|
||||||
const double minEigenValue = DBL_EPSILON;
|
const double minEigenValue = DBL_EPSILON;
|
||||||
|
|
||||||
EM::Params::Params(int _nclusters, int _covMatType, const TermCriteria& _termCrit)
|
|
||||||
{
|
|
||||||
nclusters = _nclusters;
|
|
||||||
covMatType = _covMatType;
|
|
||||||
termCrit = _termCrit;
|
|
||||||
}
|
|
||||||
|
|
||||||
class CV_EXPORTS EMImpl : public EM
|
class CV_EXPORTS EMImpl : public EM
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
EMImpl(const Params& _params)
|
|
||||||
|
int nclusters;
|
||||||
|
int covMatType;
|
||||||
|
TermCriteria termCrit;
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, termCrit)
|
||||||
|
|
||||||
|
void setClustersNumber(int val)
|
||||||
{
|
{
|
||||||
setParams(_params);
|
nclusters = val;
|
||||||
|
CV_Assert(nclusters > 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
int getClustersNumber() const
|
||||||
|
{
|
||||||
|
return nclusters;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setCovarianceMatrixType(int val)
|
||||||
|
{
|
||||||
|
covMatType = val;
|
||||||
|
CV_Assert(covMatType == COV_MAT_SPHERICAL ||
|
||||||
|
covMatType == COV_MAT_DIAGONAL ||
|
||||||
|
covMatType == COV_MAT_GENERIC);
|
||||||
|
}
|
||||||
|
|
||||||
|
int getCovarianceMatrixType() const
|
||||||
|
{
|
||||||
|
return covMatType;
|
||||||
|
}
|
||||||
|
|
||||||
|
EMImpl()
|
||||||
|
{
|
||||||
|
nclusters = DEFAULT_NCLUSTERS;
|
||||||
|
covMatType=EM::COV_MAT_DIAGONAL;
|
||||||
|
termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, 1e-6);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~EMImpl() {}
|
virtual ~EMImpl() {}
|
||||||
|
|
||||||
void setParams(const Params& _params)
|
|
||||||
{
|
|
||||||
params = _params;
|
|
||||||
CV_Assert(params.nclusters > 1);
|
|
||||||
CV_Assert(params.covMatType == COV_MAT_SPHERICAL ||
|
|
||||||
params.covMatType == COV_MAT_DIAGONAL ||
|
|
||||||
params.covMatType == COV_MAT_GENERIC);
|
|
||||||
}
|
|
||||||
|
|
||||||
Params getParams() const
|
|
||||||
{
|
|
||||||
return params;
|
|
||||||
}
|
|
||||||
|
|
||||||
void clear()
|
void clear()
|
||||||
{
|
{
|
||||||
trainSamples.release();
|
trainSamples.release();
|
||||||
@ -100,10 +112,10 @@ public:
|
|||||||
bool train(const Ptr<TrainData>& data, int)
|
bool train(const Ptr<TrainData>& data, int)
|
||||||
{
|
{
|
||||||
Mat samples = data->getTrainSamples(), labels;
|
Mat samples = data->getTrainSamples(), labels;
|
||||||
return train_(samples, labels, noArray(), noArray());
|
return trainEM(samples, labels, noArray(), noArray());
|
||||||
}
|
}
|
||||||
|
|
||||||
bool train_(InputArray samples,
|
bool trainEM(InputArray samples,
|
||||||
OutputArray logLikelihoods,
|
OutputArray logLikelihoods,
|
||||||
OutputArray labels,
|
OutputArray labels,
|
||||||
OutputArray probs)
|
OutputArray probs)
|
||||||
@ -157,7 +169,7 @@ public:
|
|||||||
{
|
{
|
||||||
if( _outputs.fixedType() )
|
if( _outputs.fixedType() )
|
||||||
ptype = _outputs.type();
|
ptype = _outputs.type();
|
||||||
_outputs.create(samples.rows, params.nclusters, ptype);
|
_outputs.create(samples.rows, nclusters, ptype);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
nsamples = std::min(nsamples, 1);
|
nsamples = std::min(nsamples, 1);
|
||||||
@ -193,7 +205,7 @@ public:
|
|||||||
{
|
{
|
||||||
if( _probs.fixedType() )
|
if( _probs.fixedType() )
|
||||||
ptype = _probs.type();
|
ptype = _probs.type();
|
||||||
_probs.create(1, params.nclusters, ptype);
|
_probs.create(1, nclusters, ptype);
|
||||||
probs = _probs.getMat();
|
probs = _probs.getMat();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -311,7 +323,6 @@ public:
|
|||||||
const std::vector<Mat>* covs0,
|
const std::vector<Mat>* covs0,
|
||||||
const Mat* weights0)
|
const Mat* weights0)
|
||||||
{
|
{
|
||||||
int nclusters = params.nclusters, covMatType = params.covMatType;
|
|
||||||
clear();
|
clear();
|
||||||
|
|
||||||
checkTrainData(startStep, samples, nclusters, covMatType, probs0, means0, covs0, weights0);
|
checkTrainData(startStep, samples, nclusters, covMatType, probs0, means0, covs0, weights0);
|
||||||
@ -350,7 +361,6 @@ public:
|
|||||||
|
|
||||||
void decomposeCovs()
|
void decomposeCovs()
|
||||||
{
|
{
|
||||||
int nclusters = params.nclusters, covMatType = params.covMatType;
|
|
||||||
CV_Assert(!covs.empty());
|
CV_Assert(!covs.empty());
|
||||||
covsEigenValues.resize(nclusters);
|
covsEigenValues.resize(nclusters);
|
||||||
if(covMatType == COV_MAT_GENERIC)
|
if(covMatType == COV_MAT_GENERIC)
|
||||||
@ -383,7 +393,6 @@ public:
|
|||||||
|
|
||||||
void clusterTrainSamples()
|
void clusterTrainSamples()
|
||||||
{
|
{
|
||||||
int nclusters = params.nclusters;
|
|
||||||
int nsamples = trainSamples.rows;
|
int nsamples = trainSamples.rows;
|
||||||
|
|
||||||
// Cluster samples, compute/update means
|
// Cluster samples, compute/update means
|
||||||
@ -443,7 +452,6 @@ public:
|
|||||||
|
|
||||||
void computeLogWeightDivDet()
|
void computeLogWeightDivDet()
|
||||||
{
|
{
|
||||||
int nclusters = params.nclusters;
|
|
||||||
CV_Assert(!covsEigenValues.empty());
|
CV_Assert(!covsEigenValues.empty());
|
||||||
|
|
||||||
Mat logWeights;
|
Mat logWeights;
|
||||||
@ -458,7 +466,7 @@ public:
|
|||||||
double logDetCov = 0.;
|
double logDetCov = 0.;
|
||||||
const int evalCount = static_cast<int>(covsEigenValues[clusterIndex].total());
|
const int evalCount = static_cast<int>(covsEigenValues[clusterIndex].total());
|
||||||
for(int di = 0; di < evalCount; di++)
|
for(int di = 0; di < evalCount; di++)
|
||||||
logDetCov += std::log(covsEigenValues[clusterIndex].at<double>(params.covMatType != COV_MAT_SPHERICAL ? di : 0));
|
logDetCov += std::log(covsEigenValues[clusterIndex].at<double>(covMatType != COV_MAT_SPHERICAL ? di : 0));
|
||||||
|
|
||||||
logWeightDivDet.at<double>(clusterIndex) = logWeights.at<double>(clusterIndex) - 0.5 * logDetCov;
|
logWeightDivDet.at<double>(clusterIndex) = logWeights.at<double>(clusterIndex) - 0.5 * logDetCov;
|
||||||
}
|
}
|
||||||
@ -466,7 +474,6 @@ public:
|
|||||||
|
|
||||||
bool doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs)
|
bool doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs)
|
||||||
{
|
{
|
||||||
int nclusters = params.nclusters;
|
|
||||||
int dim = trainSamples.cols;
|
int dim = trainSamples.cols;
|
||||||
// Precompute the empty initial train data in the cases of START_E_STEP and START_AUTO_STEP
|
// Precompute the empty initial train data in the cases of START_E_STEP and START_AUTO_STEP
|
||||||
if(startStep != START_M_STEP)
|
if(startStep != START_M_STEP)
|
||||||
@ -488,9 +495,9 @@ public:
|
|||||||
mStep();
|
mStep();
|
||||||
|
|
||||||
double trainLogLikelihood, prevTrainLogLikelihood = 0.;
|
double trainLogLikelihood, prevTrainLogLikelihood = 0.;
|
||||||
int maxIters = (params.termCrit.type & TermCriteria::MAX_ITER) ?
|
int maxIters = (termCrit.type & TermCriteria::MAX_ITER) ?
|
||||||
params.termCrit.maxCount : DEFAULT_MAX_ITERS;
|
termCrit.maxCount : DEFAULT_MAX_ITERS;
|
||||||
double epsilon = (params.termCrit.type & TermCriteria::EPS) ? params.termCrit.epsilon : 0.;
|
double epsilon = (termCrit.type & TermCriteria::EPS) ? termCrit.epsilon : 0.;
|
||||||
|
|
||||||
for(int iter = 0; ; iter++)
|
for(int iter = 0; ; iter++)
|
||||||
{
|
{
|
||||||
@ -521,12 +528,12 @@ public:
|
|||||||
covs.resize(nclusters);
|
covs.resize(nclusters);
|
||||||
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
|
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
|
||||||
{
|
{
|
||||||
if(params.covMatType == COV_MAT_SPHERICAL)
|
if(covMatType == COV_MAT_SPHERICAL)
|
||||||
{
|
{
|
||||||
covs[clusterIndex].create(dim, dim, CV_64FC1);
|
covs[clusterIndex].create(dim, dim, CV_64FC1);
|
||||||
setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at<double>(0)));
|
setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at<double>(0)));
|
||||||
}
|
}
|
||||||
else if(params.covMatType == COV_MAT_DIAGONAL)
|
else if(covMatType == COV_MAT_DIAGONAL)
|
||||||
{
|
{
|
||||||
covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]);
|
covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]);
|
||||||
}
|
}
|
||||||
@ -555,7 +562,6 @@ public:
|
|||||||
// see Alex Smola's blog http://blog.smola.org/page/2 for
|
// see Alex Smola's blog http://blog.smola.org/page/2 for
|
||||||
// details on the log-sum-exp trick
|
// details on the log-sum-exp trick
|
||||||
|
|
||||||
int nclusters = params.nclusters, covMatType = params.covMatType;
|
|
||||||
int stype = sample.type();
|
int stype = sample.type();
|
||||||
CV_Assert(!means.empty());
|
CV_Assert(!means.empty());
|
||||||
CV_Assert((stype == CV_32F || stype == CV_64F) && (ptype == CV_32F || ptype == CV_64F));
|
CV_Assert((stype == CV_32F || stype == CV_64F) && (ptype == CV_32F || ptype == CV_64F));
|
||||||
@ -621,7 +627,7 @@ public:
|
|||||||
void eStep()
|
void eStep()
|
||||||
{
|
{
|
||||||
// Compute probs_ik from means_k, covs_k and weights_k.
|
// Compute probs_ik from means_k, covs_k and weights_k.
|
||||||
trainProbs.create(trainSamples.rows, params.nclusters, CV_64FC1);
|
trainProbs.create(trainSamples.rows, nclusters, CV_64FC1);
|
||||||
trainLabels.create(trainSamples.rows, 1, CV_32SC1);
|
trainLabels.create(trainSamples.rows, 1, CV_32SC1);
|
||||||
trainLogLikelihoods.create(trainSamples.rows, 1, CV_64FC1);
|
trainLogLikelihoods.create(trainSamples.rows, 1, CV_64FC1);
|
||||||
|
|
||||||
@ -642,8 +648,6 @@ public:
|
|||||||
void mStep()
|
void mStep()
|
||||||
{
|
{
|
||||||
// Update means_k, covs_k and weights_k from probs_ik
|
// Update means_k, covs_k and weights_k from probs_ik
|
||||||
int nclusters = params.nclusters;
|
|
||||||
int covMatType = params.covMatType;
|
|
||||||
int dim = trainSamples.cols;
|
int dim = trainSamples.cols;
|
||||||
|
|
||||||
// Update weights
|
// Update weights
|
||||||
@ -755,12 +759,12 @@ public:
|
|||||||
|
|
||||||
void write_params(FileStorage& fs) const
|
void write_params(FileStorage& fs) const
|
||||||
{
|
{
|
||||||
fs << "nclusters" << params.nclusters;
|
fs << "nclusters" << nclusters;
|
||||||
fs << "cov_mat_type" << (params.covMatType == COV_MAT_SPHERICAL ? String("spherical") :
|
fs << "cov_mat_type" << (covMatType == COV_MAT_SPHERICAL ? String("spherical") :
|
||||||
params.covMatType == COV_MAT_DIAGONAL ? String("diagonal") :
|
covMatType == COV_MAT_DIAGONAL ? String("diagonal") :
|
||||||
params.covMatType == COV_MAT_GENERIC ? String("generic") :
|
covMatType == COV_MAT_GENERIC ? String("generic") :
|
||||||
format("unknown_%d", params.covMatType));
|
format("unknown_%d", covMatType));
|
||||||
writeTermCrit(fs, params.termCrit);
|
writeTermCrit(fs, termCrit);
|
||||||
}
|
}
|
||||||
|
|
||||||
void write(FileStorage& fs) const
|
void write(FileStorage& fs) const
|
||||||
@ -781,15 +785,13 @@ public:
|
|||||||
|
|
||||||
void read_params(const FileNode& fn)
|
void read_params(const FileNode& fn)
|
||||||
{
|
{
|
||||||
Params _params;
|
nclusters = (int)fn["nclusters"];
|
||||||
_params.nclusters = (int)fn["nclusters"];
|
|
||||||
String s = (String)fn["cov_mat_type"];
|
String s = (String)fn["cov_mat_type"];
|
||||||
_params.covMatType = s == "spherical" ? COV_MAT_SPHERICAL :
|
covMatType = s == "spherical" ? COV_MAT_SPHERICAL :
|
||||||
s == "diagonal" ? COV_MAT_DIAGONAL :
|
s == "diagonal" ? COV_MAT_DIAGONAL :
|
||||||
s == "generic" ? COV_MAT_GENERIC : -1;
|
s == "generic" ? COV_MAT_GENERIC : -1;
|
||||||
CV_Assert(_params.covMatType >= 0);
|
CV_Assert(covMatType >= 0);
|
||||||
_params.termCrit = readTermCrit(fn);
|
termCrit = readTermCrit(fn);
|
||||||
setParams(_params);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void read(const FileNode& fn)
|
void read(const FileNode& fn)
|
||||||
@ -820,8 +822,6 @@ public:
|
|||||||
std::copy(covs.begin(), covs.end(), _covs.begin());
|
std::copy(covs.begin(), covs.end(), _covs.begin());
|
||||||
}
|
}
|
||||||
|
|
||||||
Params params;
|
|
||||||
|
|
||||||
// all inner matrices have type CV_64FC1
|
// all inner matrices have type CV_64FC1
|
||||||
Mat trainSamples;
|
Mat trainSamples;
|
||||||
Mat trainProbs;
|
Mat trainProbs;
|
||||||
@ -838,41 +838,9 @@ public:
|
|||||||
Mat logWeightDivDet;
|
Mat logWeightDivDet;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
Ptr<EM> EM::create()
|
||||||
Ptr<EM> EM::train(InputArray samples, OutputArray logLikelihoods,
|
|
||||||
OutputArray labels, OutputArray probs,
|
|
||||||
const EM::Params& params)
|
|
||||||
{
|
{
|
||||||
Ptr<EMImpl> em = makePtr<EMImpl>(params);
|
return makePtr<EMImpl>();
|
||||||
if(!em->train_(samples, logLikelihoods, labels, probs))
|
|
||||||
em.release();
|
|
||||||
return em;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<EM> EM::train_startWithE(InputArray samples, InputArray means0,
|
|
||||||
InputArray covs0, InputArray weights0,
|
|
||||||
OutputArray logLikelihoods, OutputArray labels,
|
|
||||||
OutputArray probs, const EM::Params& params)
|
|
||||||
{
|
|
||||||
Ptr<EMImpl> em = makePtr<EMImpl>(params);
|
|
||||||
if(!em->trainE(samples, means0, covs0, weights0, logLikelihoods, labels, probs))
|
|
||||||
em.release();
|
|
||||||
return em;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<EM> EM::train_startWithM(InputArray samples, InputArray probs0,
|
|
||||||
OutputArray logLikelihoods, OutputArray labels,
|
|
||||||
OutputArray probs, const EM::Params& params)
|
|
||||||
{
|
|
||||||
Ptr<EMImpl> em = makePtr<EMImpl>(params);
|
|
||||||
if(!em->trainM(samples, probs0, logLikelihoods, labels, probs))
|
|
||||||
em.release();
|
|
||||||
return em;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ptr<EM> EM::create(const Params& params)
|
|
||||||
{
|
|
||||||
return makePtr<EMImpl>(params);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -50,46 +50,33 @@
|
|||||||
namespace cv {
|
namespace cv {
|
||||||
namespace ml {
|
namespace ml {
|
||||||
|
|
||||||
KNearest::Params::Params(int k, bool isclassifier_, int Emax_, int algorithmType_) :
|
const String NAME_BRUTE_FORCE = "opencv_ml_knn";
|
||||||
defaultK(k),
|
const String NAME_KDTREE = "opencv_ml_knn_kd";
|
||||||
isclassifier(isclassifier_),
|
|
||||||
Emax(Emax_),
|
|
||||||
algorithmType(algorithmType_)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
class KNearestImpl : public KNearest
|
class Impl
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
KNearestImpl(const Params& p)
|
Impl()
|
||||||
{
|
{
|
||||||
params = p;
|
defaultK = 10;
|
||||||
|
isclassifier = true;
|
||||||
|
Emax = INT_MAX;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~KNearestImpl() {}
|
virtual ~Impl() {}
|
||||||
|
virtual String getModelName() const = 0;
|
||||||
Params getParams() const { return params; }
|
virtual int getType() const = 0;
|
||||||
void setParams(const Params& p) { params = p; }
|
virtual float findNearest( InputArray _samples, int k,
|
||||||
|
OutputArray _results,
|
||||||
bool isClassifier() const { return params.isclassifier; }
|
OutputArray _neighborResponses,
|
||||||
bool isTrained() const { return !samples.empty(); }
|
OutputArray _dists ) const = 0;
|
||||||
|
|
||||||
String getDefaultModelName() const { return "opencv_ml_knn"; }
|
|
||||||
|
|
||||||
void clear()
|
|
||||||
{
|
|
||||||
samples.release();
|
|
||||||
responses.release();
|
|
||||||
}
|
|
||||||
|
|
||||||
int getVarCount() const { return samples.cols; }
|
|
||||||
|
|
||||||
bool train( const Ptr<TrainData>& data, int flags )
|
bool train( const Ptr<TrainData>& data, int flags )
|
||||||
{
|
{
|
||||||
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
|
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
|
||||||
Mat new_responses;
|
Mat new_responses;
|
||||||
data->getTrainResponses().convertTo(new_responses, CV_32F);
|
data->getTrainResponses().convertTo(new_responses, CV_32F);
|
||||||
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
|
bool update = (flags & ml::KNearest::UPDATE_MODEL) != 0 && !samples.empty();
|
||||||
|
|
||||||
CV_Assert( new_samples.type() == CV_32F );
|
CV_Assert( new_samples.type() == CV_32F );
|
||||||
|
|
||||||
@ -106,9 +93,53 @@ public:
|
|||||||
samples.push_back(new_samples);
|
samples.push_back(new_samples);
|
||||||
responses.push_back(new_responses);
|
responses.push_back(new_responses);
|
||||||
|
|
||||||
|
doTrain(samples);
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void doTrain(InputArray points) { (void)points; }
|
||||||
|
|
||||||
|
void clear()
|
||||||
|
{
|
||||||
|
samples.release();
|
||||||
|
responses.release();
|
||||||
|
}
|
||||||
|
|
||||||
|
void read( const FileNode& fn )
|
||||||
|
{
|
||||||
|
clear();
|
||||||
|
isclassifier = (int)fn["is_classifier"] != 0;
|
||||||
|
defaultK = (int)fn["default_k"];
|
||||||
|
|
||||||
|
fn["samples"] >> samples;
|
||||||
|
fn["responses"] >> responses;
|
||||||
|
}
|
||||||
|
|
||||||
|
void write( FileStorage& fs ) const
|
||||||
|
{
|
||||||
|
fs << "is_classifier" << (int)isclassifier;
|
||||||
|
fs << "default_k" << defaultK;
|
||||||
|
|
||||||
|
fs << "samples" << samples;
|
||||||
|
fs << "responses" << responses;
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
int defaultK;
|
||||||
|
bool isclassifier;
|
||||||
|
int Emax;
|
||||||
|
|
||||||
|
Mat samples;
|
||||||
|
Mat responses;
|
||||||
|
};
|
||||||
|
|
||||||
|
class BruteForceImpl : public Impl
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
String getModelName() const { return NAME_BRUTE_FORCE; }
|
||||||
|
int getType() const { return ml::KNearest::BRUTE_FORCE; }
|
||||||
|
|
||||||
void findNearestCore( const Mat& _samples, int k0, const Range& range,
|
void findNearestCore( const Mat& _samples, int k0, const Range& range,
|
||||||
Mat* results, Mat* neighbor_responses,
|
Mat* results, Mat* neighbor_responses,
|
||||||
Mat* dists, float* presult ) const
|
Mat* dists, float* presult ) const
|
||||||
@ -199,7 +230,7 @@ public:
|
|||||||
|
|
||||||
if( results || testidx+range.start == 0 )
|
if( results || testidx+range.start == 0 )
|
||||||
{
|
{
|
||||||
if( !params.isclassifier || k == 1 )
|
if( !isclassifier || k == 1 )
|
||||||
{
|
{
|
||||||
float s = 0.f;
|
float s = 0.f;
|
||||||
for( j = 0; j < k; j++ )
|
for( j = 0; j < k; j++ )
|
||||||
@ -251,7 +282,7 @@ public:
|
|||||||
|
|
||||||
struct findKNearestInvoker : public ParallelLoopBody
|
struct findKNearestInvoker : public ParallelLoopBody
|
||||||
{
|
{
|
||||||
findKNearestInvoker(const KNearestImpl* _p, int _k, const Mat& __samples,
|
findKNearestInvoker(const BruteForceImpl* _p, int _k, const Mat& __samples,
|
||||||
Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult)
|
Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult)
|
||||||
{
|
{
|
||||||
p = _p;
|
p = _p;
|
||||||
@ -273,7 +304,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const KNearestImpl* p;
|
const BruteForceImpl* p;
|
||||||
int k;
|
int k;
|
||||||
const Mat* _samples;
|
const Mat* _samples;
|
||||||
Mat* _results;
|
Mat* _results;
|
||||||
@ -324,88 +355,18 @@ public:
|
|||||||
//invoker(Range(0, testcount));
|
//invoker(Range(0, testcount));
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
float predict(InputArray inputs, OutputArray outputs, int) const
|
|
||||||
{
|
|
||||||
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
|
|
||||||
}
|
|
||||||
|
|
||||||
void write( FileStorage& fs ) const
|
|
||||||
{
|
|
||||||
fs << "is_classifier" << (int)params.isclassifier;
|
|
||||||
fs << "default_k" << params.defaultK;
|
|
||||||
|
|
||||||
fs << "samples" << samples;
|
|
||||||
fs << "responses" << responses;
|
|
||||||
}
|
|
||||||
|
|
||||||
void read( const FileNode& fn )
|
|
||||||
{
|
|
||||||
clear();
|
|
||||||
params.isclassifier = (int)fn["is_classifier"] != 0;
|
|
||||||
params.defaultK = (int)fn["default_k"];
|
|
||||||
|
|
||||||
fn["samples"] >> samples;
|
|
||||||
fn["responses"] >> responses;
|
|
||||||
}
|
|
||||||
|
|
||||||
Mat samples;
|
|
||||||
Mat responses;
|
|
||||||
Params params;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
class KNearestKDTreeImpl : public KNearest
|
class KDTreeImpl : public Impl
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
KNearestKDTreeImpl(const Params& p)
|
String getModelName() const { return NAME_KDTREE; }
|
||||||
|
int getType() const { return ml::KNearest::KDTREE; }
|
||||||
|
|
||||||
|
void doTrain(InputArray points)
|
||||||
{
|
{
|
||||||
params = p;
|
tr.build(points);
|
||||||
}
|
|
||||||
|
|
||||||
virtual ~KNearestKDTreeImpl() {}
|
|
||||||
|
|
||||||
Params getParams() const { return params; }
|
|
||||||
void setParams(const Params& p) { params = p; }
|
|
||||||
|
|
||||||
bool isClassifier() const { return params.isclassifier; }
|
|
||||||
bool isTrained() const { return !samples.empty(); }
|
|
||||||
|
|
||||||
String getDefaultModelName() const { return "opencv_ml_knn_kd"; }
|
|
||||||
|
|
||||||
void clear()
|
|
||||||
{
|
|
||||||
samples.release();
|
|
||||||
responses.release();
|
|
||||||
}
|
|
||||||
|
|
||||||
int getVarCount() const { return samples.cols; }
|
|
||||||
|
|
||||||
bool train( const Ptr<TrainData>& data, int flags )
|
|
||||||
{
|
|
||||||
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
|
|
||||||
Mat new_responses;
|
|
||||||
data->getTrainResponses().convertTo(new_responses, CV_32F);
|
|
||||||
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
|
|
||||||
|
|
||||||
CV_Assert( new_samples.type() == CV_32F );
|
|
||||||
|
|
||||||
if( !update )
|
|
||||||
{
|
|
||||||
clear();
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
CV_Assert( new_samples.cols == samples.cols &&
|
|
||||||
new_responses.cols == responses.cols );
|
|
||||||
}
|
|
||||||
|
|
||||||
samples.push_back(new_samples);
|
|
||||||
responses.push_back(new_responses);
|
|
||||||
|
|
||||||
tr.build(samples);
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
float findNearest( InputArray _samples, int k,
|
float findNearest( InputArray _samples, int k,
|
||||||
@ -460,51 +421,97 @@ public:
|
|||||||
{
|
{
|
||||||
_d = d.row(i);
|
_d = d.row(i);
|
||||||
}
|
}
|
||||||
tr.findNearest(test_samples.row(i), k, params.Emax, _res, _nr, _d, noArray());
|
tr.findNearest(test_samples.row(i), k, Emax, _res, _nr, _d, noArray());
|
||||||
}
|
}
|
||||||
|
|
||||||
return result; // currently always 0
|
return result; // currently always 0
|
||||||
}
|
}
|
||||||
|
|
||||||
float predict(InputArray inputs, OutputArray outputs, int) const
|
KDTree tr;
|
||||||
|
};
|
||||||
|
|
||||||
|
//================================================================
|
||||||
|
|
||||||
|
class KNearestImpl : public KNearest
|
||||||
|
{
|
||||||
|
CV_IMPL_PROPERTY(int, DefaultK, impl->defaultK)
|
||||||
|
CV_IMPL_PROPERTY(bool, IsClassifier, impl->isclassifier)
|
||||||
|
CV_IMPL_PROPERTY(int, Emax, impl->Emax)
|
||||||
|
|
||||||
|
public:
|
||||||
|
int getAlgorithmType() const
|
||||||
{
|
{
|
||||||
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
|
return impl->getType();
|
||||||
}
|
}
|
||||||
|
void setAlgorithmType(int val)
|
||||||
|
{
|
||||||
|
if (val != BRUTE_FORCE && val != KDTREE)
|
||||||
|
val = BRUTE_FORCE;
|
||||||
|
initImpl(val);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
KNearestImpl()
|
||||||
|
{
|
||||||
|
initImpl(BRUTE_FORCE);
|
||||||
|
}
|
||||||
|
~KNearestImpl()
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isClassifier() const { return impl->isclassifier; }
|
||||||
|
bool isTrained() const { return !impl->samples.empty(); }
|
||||||
|
|
||||||
|
int getVarCount() const { return impl->samples.cols; }
|
||||||
|
|
||||||
void write( FileStorage& fs ) const
|
void write( FileStorage& fs ) const
|
||||||
{
|
{
|
||||||
fs << "is_classifier" << (int)params.isclassifier;
|
impl->write(fs);
|
||||||
fs << "default_k" << params.defaultK;
|
|
||||||
|
|
||||||
fs << "samples" << samples;
|
|
||||||
fs << "responses" << responses;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void read( const FileNode& fn )
|
void read( const FileNode& fn )
|
||||||
{
|
{
|
||||||
clear();
|
int algorithmType = BRUTE_FORCE;
|
||||||
params.isclassifier = (int)fn["is_classifier"] != 0;
|
if (fn.name() == NAME_KDTREE)
|
||||||
params.defaultK = (int)fn["default_k"];
|
algorithmType = KDTREE;
|
||||||
|
initImpl(algorithmType);
|
||||||
fn["samples"] >> samples;
|
impl->read(fn);
|
||||||
fn["responses"] >> responses;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KDTree tr;
|
float findNearest( InputArray samples, int k,
|
||||||
|
OutputArray results,
|
||||||
|
OutputArray neighborResponses=noArray(),
|
||||||
|
OutputArray dist=noArray() ) const
|
||||||
|
{
|
||||||
|
return impl->findNearest(samples, k, results, neighborResponses, dist);
|
||||||
|
}
|
||||||
|
|
||||||
Mat samples;
|
float predict(InputArray inputs, OutputArray outputs, int) const
|
||||||
Mat responses;
|
{
|
||||||
Params params;
|
return impl->findNearest( inputs, impl->defaultK, outputs, noArray(), noArray() );
|
||||||
|
}
|
||||||
|
|
||||||
|
bool train( const Ptr<TrainData>& data, int flags )
|
||||||
|
{
|
||||||
|
return impl->train(data, flags);
|
||||||
|
}
|
||||||
|
|
||||||
|
String getDefaultModelName() const { return impl->getModelName(); }
|
||||||
|
|
||||||
|
protected:
|
||||||
|
void initImpl(int algorithmType)
|
||||||
|
{
|
||||||
|
if (algorithmType != KDTREE)
|
||||||
|
impl = makePtr<BruteForceImpl>();
|
||||||
|
else
|
||||||
|
impl = makePtr<KDTreeImpl>();
|
||||||
|
}
|
||||||
|
Ptr<Impl> impl;
|
||||||
};
|
};
|
||||||
|
|
||||||
Ptr<KNearest> KNearest::create(const Params& p)
|
Ptr<KNearest> KNearest::create()
|
||||||
{
|
{
|
||||||
if (KDTREE==p.algorithmType)
|
return makePtr<KNearestImpl>();
|
||||||
{
|
|
||||||
return makePtr<KNearestKDTreeImpl>(p);
|
|
||||||
}
|
|
||||||
|
|
||||||
return makePtr<KNearestImpl>(p);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -60,31 +60,41 @@ using namespace std;
|
|||||||
namespace cv {
|
namespace cv {
|
||||||
namespace ml {
|
namespace ml {
|
||||||
|
|
||||||
LogisticRegression::Params::Params(double learning_rate,
|
class LrParams
|
||||||
int iters,
|
|
||||||
int method,
|
|
||||||
int normlization,
|
|
||||||
int reg,
|
|
||||||
int batch_size)
|
|
||||||
{
|
{
|
||||||
alpha = learning_rate;
|
public:
|
||||||
num_iters = iters;
|
LrParams()
|
||||||
norm = normlization;
|
{
|
||||||
regularized = reg;
|
alpha = 0.001;
|
||||||
train_method = method;
|
num_iters = 1000;
|
||||||
mini_batch_size = batch_size;
|
norm = LogisticRegression::REG_L2;
|
||||||
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
|
train_method = LogisticRegression::BATCH;
|
||||||
}
|
mini_batch_size = 1;
|
||||||
|
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
|
||||||
|
}
|
||||||
|
|
||||||
|
double alpha; //!< learning rate.
|
||||||
|
int num_iters; //!< number of iterations.
|
||||||
|
int norm;
|
||||||
|
int train_method;
|
||||||
|
int mini_batch_size;
|
||||||
|
TermCriteria term_crit;
|
||||||
|
};
|
||||||
|
|
||||||
class LogisticRegressionImpl : public LogisticRegression
|
class LogisticRegressionImpl : public LogisticRegression
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
LogisticRegressionImpl(const Params& pms)
|
|
||||||
: params(pms)
|
LogisticRegressionImpl() { }
|
||||||
{
|
|
||||||
}
|
|
||||||
virtual ~LogisticRegressionImpl() {}
|
virtual ~LogisticRegressionImpl() {}
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(double, LearningRate, params.alpha)
|
||||||
|
CV_IMPL_PROPERTY(int, Iterations, params.num_iters)
|
||||||
|
CV_IMPL_PROPERTY(int, Regularization, params.norm)
|
||||||
|
CV_IMPL_PROPERTY(int, TrainMethod, params.train_method)
|
||||||
|
CV_IMPL_PROPERTY(int, MiniBatchSize, params.mini_batch_size)
|
||||||
|
CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.term_crit)
|
||||||
|
|
||||||
virtual bool train( const Ptr<TrainData>& trainData, int=0 );
|
virtual bool train( const Ptr<TrainData>& trainData, int=0 );
|
||||||
virtual float predict(InputArray samples, OutputArray results, int) const;
|
virtual float predict(InputArray samples, OutputArray results, int) const;
|
||||||
virtual void clear();
|
virtual void clear();
|
||||||
@ -103,7 +113,7 @@ protected:
|
|||||||
bool set_label_map(const Mat& _labels_i);
|
bool set_label_map(const Mat& _labels_i);
|
||||||
Mat remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const;
|
Mat remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const;
|
||||||
protected:
|
protected:
|
||||||
Params params;
|
LrParams params;
|
||||||
Mat learnt_thetas;
|
Mat learnt_thetas;
|
||||||
map<int, int> forward_mapper;
|
map<int, int> forward_mapper;
|
||||||
map<int, int> reverse_mapper;
|
map<int, int> reverse_mapper;
|
||||||
@ -111,9 +121,9 @@ protected:
|
|||||||
Mat labels_n;
|
Mat labels_n;
|
||||||
};
|
};
|
||||||
|
|
||||||
Ptr<LogisticRegression> LogisticRegression::create(const Params& params)
|
Ptr<LogisticRegression> LogisticRegression::create()
|
||||||
{
|
{
|
||||||
return makePtr<LogisticRegressionImpl>(params);
|
return makePtr<LogisticRegressionImpl>();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
|
bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
|
||||||
@ -312,7 +322,7 @@ double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels
|
|||||||
theta_b = _init_theta(Range(1, n), Range::all());
|
theta_b = _init_theta(Range(1, n), Range::all());
|
||||||
multiply(theta_b, theta_b, theta_c, 1);
|
multiply(theta_b, theta_b, theta_c, 1);
|
||||||
|
|
||||||
if(this->params.regularized > 0)
|
if(params.norm != REG_NONE)
|
||||||
{
|
{
|
||||||
llambda = 1;
|
llambda = 1;
|
||||||
}
|
}
|
||||||
@ -367,7 +377,7 @@ Mat LogisticRegressionImpl::compute_batch_gradient(const Mat& _data, const Mat&
|
|||||||
m = _data.rows;
|
m = _data.rows;
|
||||||
n = _data.cols;
|
n = _data.cols;
|
||||||
|
|
||||||
if(this->params.regularized > 0)
|
if(params.norm != REG_NONE)
|
||||||
{
|
{
|
||||||
llambda = 1;
|
llambda = 1;
|
||||||
}
|
}
|
||||||
@ -439,7 +449,7 @@ Mat LogisticRegressionImpl::compute_mini_batch_gradient(const Mat& _data, const
|
|||||||
Mat data_d;
|
Mat data_d;
|
||||||
Mat labels_l;
|
Mat labels_l;
|
||||||
|
|
||||||
if(this->params.regularized > 0)
|
if(params.norm != REG_NONE)
|
||||||
{
|
{
|
||||||
lambda_l = 1;
|
lambda_l = 1;
|
||||||
}
|
}
|
||||||
@ -570,7 +580,6 @@ void LogisticRegressionImpl::write(FileStorage& fs) const
|
|||||||
fs<<"alpha"<<this->params.alpha;
|
fs<<"alpha"<<this->params.alpha;
|
||||||
fs<<"iterations"<<this->params.num_iters;
|
fs<<"iterations"<<this->params.num_iters;
|
||||||
fs<<"norm"<<this->params.norm;
|
fs<<"norm"<<this->params.norm;
|
||||||
fs<<"regularized"<<this->params.regularized;
|
|
||||||
fs<<"train_method"<<this->params.train_method;
|
fs<<"train_method"<<this->params.train_method;
|
||||||
if(this->params.train_method == LogisticRegression::MINI_BATCH)
|
if(this->params.train_method == LogisticRegression::MINI_BATCH)
|
||||||
{
|
{
|
||||||
@ -592,7 +601,6 @@ void LogisticRegressionImpl::read(const FileNode& fn)
|
|||||||
this->params.alpha = (double)fn["alpha"];
|
this->params.alpha = (double)fn["alpha"];
|
||||||
this->params.num_iters = (int)fn["iterations"];
|
this->params.num_iters = (int)fn["iterations"];
|
||||||
this->params.norm = (int)fn["norm"];
|
this->params.norm = (int)fn["norm"];
|
||||||
this->params.regularized = (int)fn["regularized"];
|
|
||||||
this->params.train_method = (int)fn["train_method"];
|
this->params.train_method = (int)fn["train_method"];
|
||||||
|
|
||||||
if(this->params.train_method == LogisticRegression::MINI_BATCH)
|
if(this->params.train_method == LogisticRegression::MINI_BATCH)
|
||||||
|
@ -43,7 +43,6 @@
|
|||||||
namespace cv {
|
namespace cv {
|
||||||
namespace ml {
|
namespace ml {
|
||||||
|
|
||||||
NormalBayesClassifier::Params::Params() {}
|
|
||||||
|
|
||||||
class NormalBayesClassifierImpl : public NormalBayesClassifier
|
class NormalBayesClassifierImpl : public NormalBayesClassifier
|
||||||
{
|
{
|
||||||
@ -53,9 +52,6 @@ public:
|
|||||||
nallvars = 0;
|
nallvars = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setParams(const Params&) {}
|
|
||||||
Params getParams() const { return Params(); }
|
|
||||||
|
|
||||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||||
{
|
{
|
||||||
const float min_variation = FLT_EPSILON;
|
const float min_variation = FLT_EPSILON;
|
||||||
@ -455,7 +451,7 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
Ptr<NormalBayesClassifier> NormalBayesClassifier::create(const Params&)
|
Ptr<NormalBayesClassifier> NormalBayesClassifier::create()
|
||||||
{
|
{
|
||||||
Ptr<NormalBayesClassifierImpl> p = makePtr<NormalBayesClassifierImpl>();
|
Ptr<NormalBayesClassifierImpl> p = makePtr<NormalBayesClassifierImpl>();
|
||||||
return p;
|
return p;
|
||||||
|
@ -120,6 +120,91 @@ namespace ml
|
|||||||
return termCrit;
|
return termCrit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct TreeParams
|
||||||
|
{
|
||||||
|
TreeParams();
|
||||||
|
TreeParams( int maxDepth, int minSampleCount,
|
||||||
|
double regressionAccuracy, bool useSurrogates,
|
||||||
|
int maxCategories, int CVFolds,
|
||||||
|
bool use1SERule, bool truncatePrunedTree,
|
||||||
|
const Mat& priors );
|
||||||
|
|
||||||
|
inline void setMaxCategories(int val)
|
||||||
|
{
|
||||||
|
if( val < 2 )
|
||||||
|
CV_Error( CV_StsOutOfRange, "max_categories should be >= 2" );
|
||||||
|
maxCategories = std::min(val, 15 );
|
||||||
|
}
|
||||||
|
inline void setMaxDepth(int val)
|
||||||
|
{
|
||||||
|
if( val < 0 )
|
||||||
|
CV_Error( CV_StsOutOfRange, "max_depth should be >= 0" );
|
||||||
|
maxDepth = std::min( val, 25 );
|
||||||
|
}
|
||||||
|
inline void setMinSampleCount(int val)
|
||||||
|
{
|
||||||
|
minSampleCount = std::max(val, 1);
|
||||||
|
}
|
||||||
|
inline void setCVFolds(int val)
|
||||||
|
{
|
||||||
|
if( val < 0 )
|
||||||
|
CV_Error( CV_StsOutOfRange,
|
||||||
|
"params.CVFolds should be =0 (the tree is not pruned) "
|
||||||
|
"or n>0 (tree is pruned using n-fold cross-validation)" );
|
||||||
|
if( val == 1 )
|
||||||
|
val = 0;
|
||||||
|
CVFolds = val;
|
||||||
|
}
|
||||||
|
inline void setRegressionAccuracy(float val)
|
||||||
|
{
|
||||||
|
if( val < 0 )
|
||||||
|
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
|
||||||
|
regressionAccuracy = val;
|
||||||
|
}
|
||||||
|
|
||||||
|
inline int getMaxCategories() const { return maxCategories; }
|
||||||
|
inline int getMaxDepth() const { return maxDepth; }
|
||||||
|
inline int getMinSampleCount() const { return minSampleCount; }
|
||||||
|
inline int getCVFolds() const { return CVFolds; }
|
||||||
|
inline float getRegressionAccuracy() const { return regressionAccuracy; }
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(bool, UseSurrogates, useSurrogates)
|
||||||
|
CV_IMPL_PROPERTY(bool, Use1SERule, use1SERule)
|
||||||
|
CV_IMPL_PROPERTY(bool, TruncatePrunedTree, truncatePrunedTree)
|
||||||
|
CV_IMPL_PROPERTY_S(cv::Mat, Priors, priors)
|
||||||
|
|
||||||
|
public:
|
||||||
|
bool useSurrogates;
|
||||||
|
bool use1SERule;
|
||||||
|
bool truncatePrunedTree;
|
||||||
|
Mat priors;
|
||||||
|
|
||||||
|
protected:
|
||||||
|
int maxCategories;
|
||||||
|
int maxDepth;
|
||||||
|
int minSampleCount;
|
||||||
|
int CVFolds;
|
||||||
|
float regressionAccuracy;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct RTreeParams
|
||||||
|
{
|
||||||
|
RTreeParams();
|
||||||
|
RTreeParams(bool calcVarImportance, int nactiveVars, TermCriteria termCrit );
|
||||||
|
bool calcVarImportance;
|
||||||
|
int nactiveVars;
|
||||||
|
TermCriteria termCrit;
|
||||||
|
};
|
||||||
|
|
||||||
|
struct BoostTreeParams
|
||||||
|
{
|
||||||
|
BoostTreeParams();
|
||||||
|
BoostTreeParams(int boostType, int weakCount, double weightTrimRate);
|
||||||
|
int boostType;
|
||||||
|
int weakCount;
|
||||||
|
double weightTrimRate;
|
||||||
|
};
|
||||||
|
|
||||||
class DTreesImpl : public DTrees
|
class DTreesImpl : public DTrees
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -191,6 +276,16 @@ namespace ml
|
|||||||
int maxSubsetSize;
|
int maxSubsetSize;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, MaxCategories, params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, MaxDepth, params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, CVFolds, params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, params)
|
||||||
|
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, params)
|
||||||
|
|
||||||
DTreesImpl();
|
DTreesImpl();
|
||||||
virtual ~DTreesImpl();
|
virtual ~DTreesImpl();
|
||||||
virtual void clear();
|
virtual void clear();
|
||||||
@ -202,8 +297,7 @@ namespace ml
|
|||||||
int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; }
|
int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; }
|
||||||
int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; }
|
int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; }
|
||||||
|
|
||||||
virtual void setDParams(const Params& _params);
|
virtual void setDParams(const TreeParams& _params);
|
||||||
virtual Params getDParams() const;
|
|
||||||
virtual void startTraining( const Ptr<TrainData>& trainData, int flags );
|
virtual void startTraining( const Ptr<TrainData>& trainData, int flags );
|
||||||
virtual void endTraining();
|
virtual void endTraining();
|
||||||
virtual void initCompVarIdx();
|
virtual void initCompVarIdx();
|
||||||
@ -250,7 +344,7 @@ namespace ml
|
|||||||
virtual const std::vector<Split>& getSplits() const { return splits; }
|
virtual const std::vector<Split>& getSplits() const { return splits; }
|
||||||
virtual const std::vector<int>& getSubsets() const { return subsets; }
|
virtual const std::vector<int>& getSubsets() const { return subsets; }
|
||||||
|
|
||||||
Params params0, params;
|
TreeParams params;
|
||||||
|
|
||||||
vector<int> varIdx;
|
vector<int> varIdx;
|
||||||
vector<int> compVarIdx;
|
vector<int> compVarIdx;
|
||||||
|
@ -48,21 +48,16 @@ namespace ml {
|
|||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
// Random trees //
|
// Random trees //
|
||||||
//////////////////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////////////////
|
||||||
RTrees::Params::Params()
|
RTreeParams::RTreeParams()
|
||||||
: DTrees::Params(5, 10, 0.f, false, 10, 0, false, false, Mat())
|
|
||||||
{
|
{
|
||||||
calcVarImportance = false;
|
calcVarImportance = false;
|
||||||
nactiveVars = 0;
|
nactiveVars = 0;
|
||||||
termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1);
|
termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1);
|
||||||
}
|
}
|
||||||
|
|
||||||
RTrees::Params::Params( int _maxDepth, int _minSampleCount,
|
RTreeParams::RTreeParams(bool _calcVarImportance,
|
||||||
double _regressionAccuracy, bool _useSurrogates,
|
int _nactiveVars,
|
||||||
int _maxCategories, const Mat& _priors,
|
TermCriteria _termCrit )
|
||||||
bool _calcVarImportance, int _nactiveVars,
|
|
||||||
TermCriteria _termCrit )
|
|
||||||
: DTrees::Params(_maxDepth, _minSampleCount, _regressionAccuracy, _useSurrogates,
|
|
||||||
_maxCategories, 0, false, false, _priors)
|
|
||||||
{
|
{
|
||||||
calcVarImportance = _calcVarImportance;
|
calcVarImportance = _calcVarImportance;
|
||||||
nactiveVars = _nactiveVars;
|
nactiveVars = _nactiveVars;
|
||||||
@ -73,19 +68,20 @@ RTrees::Params::Params( int _maxDepth, int _minSampleCount,
|
|||||||
class DTreesImplForRTrees : public DTreesImpl
|
class DTreesImplForRTrees : public DTreesImpl
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
DTreesImplForRTrees() {}
|
DTreesImplForRTrees()
|
||||||
|
{
|
||||||
|
params.setMaxDepth(5);
|
||||||
|
params.setMinSampleCount(10);
|
||||||
|
params.setRegressionAccuracy(0.f);
|
||||||
|
params.useSurrogates = false;
|
||||||
|
params.setMaxCategories(10);
|
||||||
|
params.setCVFolds(0);
|
||||||
|
params.use1SERule = false;
|
||||||
|
params.truncatePrunedTree = false;
|
||||||
|
params.priors = Mat();
|
||||||
|
}
|
||||||
virtual ~DTreesImplForRTrees() {}
|
virtual ~DTreesImplForRTrees() {}
|
||||||
|
|
||||||
void setRParams(const RTrees::Params& p)
|
|
||||||
{
|
|
||||||
rparams = p;
|
|
||||||
}
|
|
||||||
|
|
||||||
RTrees::Params getRParams() const
|
|
||||||
{
|
|
||||||
return rparams;
|
|
||||||
}
|
|
||||||
|
|
||||||
void clear()
|
void clear()
|
||||||
{
|
{
|
||||||
DTreesImpl::clear();
|
DTreesImpl::clear();
|
||||||
@ -129,10 +125,6 @@ public:
|
|||||||
|
|
||||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||||
{
|
{
|
||||||
Params dp(rparams.maxDepth, rparams.minSampleCount, rparams.regressionAccuracy,
|
|
||||||
rparams.useSurrogates, rparams.maxCategories, rparams.CVFolds,
|
|
||||||
rparams.use1SERule, rparams.truncatePrunedTree, rparams.priors);
|
|
||||||
setDParams(dp);
|
|
||||||
startTraining(trainData, flags);
|
startTraining(trainData, flags);
|
||||||
int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ?
|
int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ?
|
||||||
rparams.termCrit.maxCount : 10000;
|
rparams.termCrit.maxCount : 10000;
|
||||||
@ -326,12 +318,6 @@ public:
|
|||||||
void readParams( const FileNode& fn )
|
void readParams( const FileNode& fn )
|
||||||
{
|
{
|
||||||
DTreesImpl::readParams(fn);
|
DTreesImpl::readParams(fn);
|
||||||
rparams.maxDepth = params0.maxDepth;
|
|
||||||
rparams.minSampleCount = params0.minSampleCount;
|
|
||||||
rparams.regressionAccuracy = params0.regressionAccuracy;
|
|
||||||
rparams.useSurrogates = params0.useSurrogates;
|
|
||||||
rparams.maxCategories = params0.maxCategories;
|
|
||||||
rparams.priors = params0.priors;
|
|
||||||
|
|
||||||
FileNode tparams_node = fn["training_params"];
|
FileNode tparams_node = fn["training_params"];
|
||||||
rparams.nactiveVars = (int)tparams_node["nactive_vars"];
|
rparams.nactiveVars = (int)tparams_node["nactive_vars"];
|
||||||
@ -361,7 +347,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
RTrees::Params rparams;
|
RTreeParams rparams;
|
||||||
double oobError;
|
double oobError;
|
||||||
vector<float> varImportance;
|
vector<float> varImportance;
|
||||||
vector<int> allVars, activeVars;
|
vector<int> allVars, activeVars;
|
||||||
@ -372,6 +358,20 @@ public:
|
|||||||
class RTreesImpl : public RTrees
|
class RTreesImpl : public RTrees
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
CV_IMPL_PROPERTY(bool, CalculateVarImportance, impl.rparams.calcVarImportance)
|
||||||
|
CV_IMPL_PROPERTY(int, ActiveVarCount, impl.rparams.nactiveVars)
|
||||||
|
CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, impl.rparams.termCrit)
|
||||||
|
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
|
||||||
|
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
|
||||||
|
|
||||||
RTreesImpl() {}
|
RTreesImpl() {}
|
||||||
virtual ~RTreesImpl() {}
|
virtual ~RTreesImpl() {}
|
||||||
|
|
||||||
@ -397,9 +397,6 @@ public:
|
|||||||
impl.read(fn);
|
impl.read(fn);
|
||||||
}
|
}
|
||||||
|
|
||||||
void setRParams(const Params& p) { impl.setRParams(p); }
|
|
||||||
Params getRParams() const { return impl.getRParams(); }
|
|
||||||
|
|
||||||
Mat getVarImportance() const { return Mat_<float>(impl.varImportance, true); }
|
Mat getVarImportance() const { return Mat_<float>(impl.varImportance, true); }
|
||||||
int getVarCount() const { return impl.getVarCount(); }
|
int getVarCount() const { return impl.getVarCount(); }
|
||||||
|
|
||||||
@ -415,11 +412,9 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
Ptr<RTrees> RTrees::create(const Params& params)
|
Ptr<RTrees> RTrees::create()
|
||||||
{
|
{
|
||||||
Ptr<RTreesImpl> p = makePtr<RTreesImpl>();
|
return makePtr<RTreesImpl>();
|
||||||
p->setRParams(params);
|
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}}
|
}}
|
||||||
|
@ -103,54 +103,60 @@ static void checkParamGrid(const ParamGrid& pg)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SVM training parameters
|
// SVM training parameters
|
||||||
SVM::Params::Params()
|
struct SvmParams
|
||||||
{
|
{
|
||||||
svmType = SVM::C_SVC;
|
int svmType;
|
||||||
kernelType = SVM::RBF;
|
int kernelType;
|
||||||
degree = 0;
|
double gamma;
|
||||||
gamma = 1;
|
double coef0;
|
||||||
coef0 = 0;
|
double degree;
|
||||||
C = 1;
|
double C;
|
||||||
nu = 0;
|
double nu;
|
||||||
p = 0;
|
double p;
|
||||||
termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON );
|
Mat classWeights;
|
||||||
}
|
TermCriteria termCrit;
|
||||||
|
|
||||||
|
SvmParams()
|
||||||
|
{
|
||||||
|
svmType = SVM::C_SVC;
|
||||||
|
kernelType = SVM::RBF;
|
||||||
|
degree = 0;
|
||||||
|
gamma = 1;
|
||||||
|
coef0 = 0;
|
||||||
|
C = 1;
|
||||||
|
nu = 0;
|
||||||
|
p = 0;
|
||||||
|
termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON );
|
||||||
|
}
|
||||||
|
|
||||||
SVM::Params::Params( int _svmType, int _kernelType,
|
SvmParams( int _svmType, int _kernelType,
|
||||||
double _degree, double _gamma, double _coef0,
|
double _degree, double _gamma, double _coef0,
|
||||||
double _Con, double _nu, double _p,
|
double _Con, double _nu, double _p,
|
||||||
const Mat& _classWeights, TermCriteria _termCrit )
|
const Mat& _classWeights, TermCriteria _termCrit )
|
||||||
{
|
{
|
||||||
svmType = _svmType;
|
svmType = _svmType;
|
||||||
kernelType = _kernelType;
|
kernelType = _kernelType;
|
||||||
degree = _degree;
|
degree = _degree;
|
||||||
gamma = _gamma;
|
gamma = _gamma;
|
||||||
coef0 = _coef0;
|
coef0 = _coef0;
|
||||||
C = _Con;
|
C = _Con;
|
||||||
nu = _nu;
|
nu = _nu;
|
||||||
p = _p;
|
p = _p;
|
||||||
classWeights = _classWeights;
|
classWeights = _classWeights;
|
||||||
termCrit = _termCrit;
|
termCrit = _termCrit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
/////////////////////////////////////// SVM kernel ///////////////////////////////////////
|
/////////////////////////////////////// SVM kernel ///////////////////////////////////////
|
||||||
class SVMKernelImpl : public SVM::Kernel
|
class SVMKernelImpl : public SVM::Kernel
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
SVMKernelImpl()
|
SVMKernelImpl( const SvmParams& _params = SvmParams() )
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
SVMKernelImpl( const SVM::Params& _params )
|
|
||||||
{
|
{
|
||||||
params = _params;
|
params = _params;
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual ~SVMKernelImpl()
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
int getType() const
|
int getType() const
|
||||||
{
|
{
|
||||||
return params.kernelType;
|
return params.kernelType;
|
||||||
@ -327,7 +333,7 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SVM::Params params;
|
SvmParams params;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -1185,7 +1191,7 @@ public:
|
|||||||
int cache_size;
|
int cache_size;
|
||||||
int max_cache_size;
|
int max_cache_size;
|
||||||
Mat samples;
|
Mat samples;
|
||||||
SVM::Params params;
|
SvmParams params;
|
||||||
vector<KernelRow> lru_cache;
|
vector<KernelRow> lru_cache;
|
||||||
int lru_first;
|
int lru_first;
|
||||||
int lru_last;
|
int lru_last;
|
||||||
@ -1215,6 +1221,7 @@ public:
|
|||||||
SVMImpl()
|
SVMImpl()
|
||||||
{
|
{
|
||||||
clear();
|
clear();
|
||||||
|
checkParams();
|
||||||
}
|
}
|
||||||
|
|
||||||
~SVMImpl()
|
~SVMImpl()
|
||||||
@ -1235,33 +1242,69 @@ public:
|
|||||||
return sv;
|
return sv;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setParams( const Params& _params, const Ptr<Kernel>& _kernel )
|
CV_IMPL_PROPERTY(int, Type, params.svmType)
|
||||||
|
CV_IMPL_PROPERTY(double, Gamma, params.gamma)
|
||||||
|
CV_IMPL_PROPERTY(double, Coef0, params.coef0)
|
||||||
|
CV_IMPL_PROPERTY(double, Degree, params.degree)
|
||||||
|
CV_IMPL_PROPERTY(double, C, params.C)
|
||||||
|
CV_IMPL_PROPERTY(double, Nu, params.nu)
|
||||||
|
CV_IMPL_PROPERTY(double, P, params.p)
|
||||||
|
CV_IMPL_PROPERTY_S(cv::Mat, ClassWeights, params.classWeights)
|
||||||
|
CV_IMPL_PROPERTY_S(cv::TermCriteria, TermCriteria, params.termCrit)
|
||||||
|
|
||||||
|
int getKernelType() const
|
||||||
{
|
{
|
||||||
params = _params;
|
return params.kernelType;
|
||||||
|
}
|
||||||
|
|
||||||
|
void setKernel(int kernelType)
|
||||||
|
{
|
||||||
|
params.kernelType = kernelType;
|
||||||
|
if (kernelType != CUSTOM)
|
||||||
|
kernel = makePtr<SVMKernelImpl>(params);
|
||||||
|
}
|
||||||
|
|
||||||
|
void setCustomKernel(const Ptr<Kernel> &_kernel)
|
||||||
|
{
|
||||||
|
params.kernelType = CUSTOM;
|
||||||
|
kernel = _kernel;
|
||||||
|
}
|
||||||
|
|
||||||
|
void checkParams()
|
||||||
|
{
|
||||||
int kernelType = params.kernelType;
|
int kernelType = params.kernelType;
|
||||||
|
if (kernelType != CUSTOM)
|
||||||
|
{
|
||||||
|
if( kernelType != LINEAR && kernelType != POLY &&
|
||||||
|
kernelType != SIGMOID && kernelType != RBF &&
|
||||||
|
kernelType != INTER && kernelType != CHI2)
|
||||||
|
CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" );
|
||||||
|
|
||||||
|
if( kernelType == LINEAR )
|
||||||
|
params.gamma = 1;
|
||||||
|
else if( params.gamma <= 0 )
|
||||||
|
CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" );
|
||||||
|
|
||||||
|
if( kernelType != SIGMOID && kernelType != POLY )
|
||||||
|
params.coef0 = 0;
|
||||||
|
else if( params.coef0 < 0 )
|
||||||
|
CV_Error( CV_StsOutOfRange, "The kernel parameter <coef0> must be positive or zero" );
|
||||||
|
|
||||||
|
if( kernelType != POLY )
|
||||||
|
params.degree = 0;
|
||||||
|
else if( params.degree <= 0 )
|
||||||
|
CV_Error( CV_StsOutOfRange, "The kernel parameter <degree> must be positive" );
|
||||||
|
|
||||||
|
kernel = makePtr<SVMKernelImpl>(params);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (!kernel)
|
||||||
|
CV_Error( CV_StsBadArg, "Custom kernel is not set" );
|
||||||
|
}
|
||||||
|
|
||||||
int svmType = params.svmType;
|
int svmType = params.svmType;
|
||||||
|
|
||||||
if( kernelType != LINEAR && kernelType != POLY &&
|
|
||||||
kernelType != SIGMOID && kernelType != RBF &&
|
|
||||||
kernelType != INTER && kernelType != CHI2)
|
|
||||||
CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" );
|
|
||||||
|
|
||||||
if( kernelType == LINEAR )
|
|
||||||
params.gamma = 1;
|
|
||||||
else if( params.gamma <= 0 )
|
|
||||||
CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" );
|
|
||||||
|
|
||||||
if( kernelType != SIGMOID && kernelType != POLY )
|
|
||||||
params.coef0 = 0;
|
|
||||||
else if( params.coef0 < 0 )
|
|
||||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <coef0> must be positive or zero" );
|
|
||||||
|
|
||||||
if( kernelType != POLY )
|
|
||||||
params.degree = 0;
|
|
||||||
else if( params.degree <= 0 )
|
|
||||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <degree> must be positive" );
|
|
||||||
|
|
||||||
if( svmType != C_SVC && svmType != NU_SVC &&
|
if( svmType != C_SVC && svmType != NU_SVC &&
|
||||||
svmType != ONE_CLASS && svmType != EPS_SVR &&
|
svmType != ONE_CLASS && svmType != EPS_SVR &&
|
||||||
svmType != NU_SVR )
|
svmType != NU_SVR )
|
||||||
@ -1285,28 +1328,18 @@ public:
|
|||||||
if( svmType != C_SVC )
|
if( svmType != C_SVC )
|
||||||
params.classWeights.release();
|
params.classWeights.release();
|
||||||
|
|
||||||
termCrit = params.termCrit;
|
if( !(params.termCrit.type & TermCriteria::EPS) )
|
||||||
if( !(termCrit.type & TermCriteria::EPS) )
|
params.termCrit.epsilon = DBL_EPSILON;
|
||||||
termCrit.epsilon = DBL_EPSILON;
|
params.termCrit.epsilon = std::max(params.termCrit.epsilon, DBL_EPSILON);
|
||||||
termCrit.epsilon = std::max(termCrit.epsilon, DBL_EPSILON);
|
if( !(params.termCrit.type & TermCriteria::COUNT) )
|
||||||
if( !(termCrit.type & TermCriteria::COUNT) )
|
params.termCrit.maxCount = INT_MAX;
|
||||||
termCrit.maxCount = INT_MAX;
|
params.termCrit.maxCount = std::max(params.termCrit.maxCount, 1);
|
||||||
termCrit.maxCount = std::max(termCrit.maxCount, 1);
|
|
||||||
|
|
||||||
if( _kernel )
|
|
||||||
kernel = _kernel;
|
|
||||||
else
|
|
||||||
kernel = makePtr<SVMKernelImpl>(params);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Params getParams() const
|
void setParams( const SvmParams& _params)
|
||||||
{
|
{
|
||||||
return params;
|
params = _params;
|
||||||
}
|
checkParams();
|
||||||
|
|
||||||
Ptr<Kernel> getKernel() const
|
|
||||||
{
|
|
||||||
return kernel;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int getSVCount(int i) const
|
int getSVCount(int i) const
|
||||||
@ -1335,9 +1368,9 @@ public:
|
|||||||
_responses.convertTo(_yf, CV_32F);
|
_responses.convertTo(_yf, CV_32F);
|
||||||
|
|
||||||
bool ok =
|
bool ok =
|
||||||
svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, termCrit ) :
|
svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, params.termCrit ) :
|
||||||
svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, termCrit ) :
|
svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, params.termCrit ) :
|
||||||
svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, termCrit ) : false;
|
svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, params.termCrit ) : false;
|
||||||
|
|
||||||
if( !ok )
|
if( !ok )
|
||||||
return false;
|
return false;
|
||||||
@ -1397,7 +1430,7 @@ public:
|
|||||||
//check that while cross-validation there were the samples from all the classes
|
//check that while cross-validation there were the samples from all the classes
|
||||||
if( class_ranges[class_count] <= 0 )
|
if( class_ranges[class_count] <= 0 )
|
||||||
CV_Error( CV_StsBadArg, "While cross-validation one or more of the classes have "
|
CV_Error( CV_StsBadArg, "While cross-validation one or more of the classes have "
|
||||||
"been fell out of the sample. Try to enlarge <CvSVMParams::k_fold>" );
|
"been fell out of the sample. Try to enlarge <Params::k_fold>" );
|
||||||
|
|
||||||
if( svmType == NU_SVC )
|
if( svmType == NU_SVC )
|
||||||
{
|
{
|
||||||
@ -1448,10 +1481,10 @@ public:
|
|||||||
DecisionFunc df;
|
DecisionFunc df;
|
||||||
bool ok = params.svmType == C_SVC ?
|
bool ok = params.svmType == C_SVC ?
|
||||||
Solver::solve_c_svc( temp_samples, temp_y, Cp, Cn,
|
Solver::solve_c_svc( temp_samples, temp_y, Cp, Cn,
|
||||||
kernel, _alpha, sinfo, termCrit ) :
|
kernel, _alpha, sinfo, params.termCrit ) :
|
||||||
params.svmType == NU_SVC ?
|
params.svmType == NU_SVC ?
|
||||||
Solver::solve_nu_svc( temp_samples, temp_y, params.nu,
|
Solver::solve_nu_svc( temp_samples, temp_y, params.nu,
|
||||||
kernel, _alpha, sinfo, termCrit ) :
|
kernel, _alpha, sinfo, params.termCrit ) :
|
||||||
false;
|
false;
|
||||||
if( !ok )
|
if( !ok )
|
||||||
return false;
|
return false;
|
||||||
@ -1557,6 +1590,8 @@ public:
|
|||||||
{
|
{
|
||||||
clear();
|
clear();
|
||||||
|
|
||||||
|
checkParams();
|
||||||
|
|
||||||
int svmType = params.svmType;
|
int svmType = params.svmType;
|
||||||
Mat samples = data->getTrainSamples();
|
Mat samples = data->getTrainSamples();
|
||||||
Mat responses;
|
Mat responses;
|
||||||
@ -1586,6 +1621,8 @@ public:
|
|||||||
ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid,
|
ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid,
|
||||||
bool balanced )
|
bool balanced )
|
||||||
{
|
{
|
||||||
|
checkParams();
|
||||||
|
|
||||||
int svmType = params.svmType;
|
int svmType = params.svmType;
|
||||||
RNG rng((uint64)-1);
|
RNG rng((uint64)-1);
|
||||||
|
|
||||||
@ -1708,7 +1745,7 @@ public:
|
|||||||
int test_sample_count = (sample_count + k_fold/2)/k_fold;
|
int test_sample_count = (sample_count + k_fold/2)/k_fold;
|
||||||
int train_sample_count = sample_count - test_sample_count;
|
int train_sample_count = sample_count - test_sample_count;
|
||||||
|
|
||||||
Params best_params = params;
|
SvmParams best_params = params;
|
||||||
double min_error = FLT_MAX;
|
double min_error = FLT_MAX;
|
||||||
|
|
||||||
int rtype = responses.type();
|
int rtype = responses.type();
|
||||||
@ -1729,7 +1766,7 @@ public:
|
|||||||
FOR_IN_GRID(degree, degree_grid)
|
FOR_IN_GRID(degree, degree_grid)
|
||||||
{
|
{
|
||||||
// make sure we updated the kernel and other parameters
|
// make sure we updated the kernel and other parameters
|
||||||
setParams(params, Ptr<Kernel>() );
|
setParams(params);
|
||||||
|
|
||||||
double error = 0;
|
double error = 0;
|
||||||
for( k = 0; k < k_fold; k++ )
|
for( k = 0; k < k_fold; k++ )
|
||||||
@ -1919,7 +1956,9 @@ public:
|
|||||||
kernelType == LINEAR ? "LINEAR" :
|
kernelType == LINEAR ? "LINEAR" :
|
||||||
kernelType == POLY ? "POLY" :
|
kernelType == POLY ? "POLY" :
|
||||||
kernelType == RBF ? "RBF" :
|
kernelType == RBF ? "RBF" :
|
||||||
kernelType == SIGMOID ? "SIGMOID" : format("Unknown_%d", kernelType);
|
kernelType == SIGMOID ? "SIGMOID" :
|
||||||
|
kernelType == CHI2 ? "CHI2" :
|
||||||
|
kernelType == INTER ? "INTER" : format("Unknown_%d", kernelType);
|
||||||
|
|
||||||
fs << "svmType" << svm_type_str;
|
fs << "svmType" << svm_type_str;
|
||||||
|
|
||||||
@ -2036,7 +2075,7 @@ public:
|
|||||||
|
|
||||||
void read_params( const FileNode& fn )
|
void read_params( const FileNode& fn )
|
||||||
{
|
{
|
||||||
Params _params;
|
SvmParams _params;
|
||||||
|
|
||||||
// check for old naming
|
// check for old naming
|
||||||
String svm_type_str = (String)(fn["svm_type"].empty() ? fn["svmType"] : fn["svm_type"]);
|
String svm_type_str = (String)(fn["svm_type"].empty() ? fn["svmType"] : fn["svm_type"]);
|
||||||
@ -2059,10 +2098,12 @@ public:
|
|||||||
kernel_type_str == "LINEAR" ? LINEAR :
|
kernel_type_str == "LINEAR" ? LINEAR :
|
||||||
kernel_type_str == "POLY" ? POLY :
|
kernel_type_str == "POLY" ? POLY :
|
||||||
kernel_type_str == "RBF" ? RBF :
|
kernel_type_str == "RBF" ? RBF :
|
||||||
kernel_type_str == "SIGMOID" ? SIGMOID : -1;
|
kernel_type_str == "SIGMOID" ? SIGMOID :
|
||||||
|
kernel_type_str == "CHI2" ? CHI2 :
|
||||||
|
kernel_type_str == "INTER" ? INTER : CUSTOM;
|
||||||
|
|
||||||
if( kernelType < 0 )
|
if( kernelType == CUSTOM )
|
||||||
CV_Error( CV_StsParseError, "Missing of invalid SVM kernel type" );
|
CV_Error( CV_StsParseError, "Invalid SVM kernel type (or custom kernel)" );
|
||||||
|
|
||||||
_params.svmType = svmType;
|
_params.svmType = svmType;
|
||||||
_params.kernelType = kernelType;
|
_params.kernelType = kernelType;
|
||||||
@ -2086,7 +2127,7 @@ public:
|
|||||||
else
|
else
|
||||||
_params.termCrit = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 1000, FLT_EPSILON );
|
_params.termCrit = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 1000, FLT_EPSILON );
|
||||||
|
|
||||||
setParams( _params, Ptr<Kernel>() );
|
setParams( _params );
|
||||||
}
|
}
|
||||||
|
|
||||||
void read( const FileNode& fn )
|
void read( const FileNode& fn )
|
||||||
@ -2154,8 +2195,7 @@ public:
|
|||||||
optimize_linear_svm();
|
optimize_linear_svm();
|
||||||
}
|
}
|
||||||
|
|
||||||
Params params;
|
SvmParams params;
|
||||||
TermCriteria termCrit;
|
|
||||||
Mat class_labels;
|
Mat class_labels;
|
||||||
int var_count;
|
int var_count;
|
||||||
Mat sv;
|
Mat sv;
|
||||||
@ -2167,11 +2207,9 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
Ptr<SVM> SVM::create(const Params& params, const Ptr<SVM::Kernel>& kernel)
|
Ptr<SVM> SVM::create()
|
||||||
{
|
{
|
||||||
Ptr<SVMImpl> p = makePtr<SVMImpl>();
|
return makePtr<SVMImpl>();
|
||||||
p->setParams(params, kernel);
|
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -48,18 +48,7 @@ namespace ml {
|
|||||||
|
|
||||||
using std::vector;
|
using std::vector;
|
||||||
|
|
||||||
void DTrees::setDParams(const DTrees::Params&)
|
TreeParams::TreeParams()
|
||||||
{
|
|
||||||
CV_Error(CV_StsNotImplemented, "");
|
|
||||||
}
|
|
||||||
|
|
||||||
DTrees::Params DTrees::getDParams() const
|
|
||||||
{
|
|
||||||
CV_Error(CV_StsNotImplemented, "");
|
|
||||||
return DTrees::Params();
|
|
||||||
}
|
|
||||||
|
|
||||||
DTrees::Params::Params()
|
|
||||||
{
|
{
|
||||||
maxDepth = INT_MAX;
|
maxDepth = INT_MAX;
|
||||||
minSampleCount = 10;
|
minSampleCount = 10;
|
||||||
@ -72,11 +61,11 @@ DTrees::Params::Params()
|
|||||||
priors = Mat();
|
priors = Mat();
|
||||||
}
|
}
|
||||||
|
|
||||||
DTrees::Params::Params( int _maxDepth, int _minSampleCount,
|
TreeParams::TreeParams(int _maxDepth, int _minSampleCount,
|
||||||
double _regressionAccuracy, bool _useSurrogates,
|
double _regressionAccuracy, bool _useSurrogates,
|
||||||
int _maxCategories, int _CVFolds,
|
int _maxCategories, int _CVFolds,
|
||||||
bool _use1SERule, bool _truncatePrunedTree,
|
bool _use1SERule, bool _truncatePrunedTree,
|
||||||
const Mat& _priors )
|
const Mat& _priors)
|
||||||
{
|
{
|
||||||
maxDepth = _maxDepth;
|
maxDepth = _maxDepth;
|
||||||
minSampleCount = _minSampleCount;
|
minSampleCount = _minSampleCount;
|
||||||
@ -248,7 +237,7 @@ const vector<int>& DTreesImpl::getActiveVars()
|
|||||||
|
|
||||||
int DTreesImpl::addTree(const vector<int>& sidx )
|
int DTreesImpl::addTree(const vector<int>& sidx )
|
||||||
{
|
{
|
||||||
size_t n = (params.maxDepth > 0 ? (1 << params.maxDepth) : 1024) + w->wnodes.size();
|
size_t n = (params.getMaxDepth() > 0 ? (1 << params.getMaxDepth()) : 1024) + w->wnodes.size();
|
||||||
|
|
||||||
w->wnodes.reserve(n);
|
w->wnodes.reserve(n);
|
||||||
w->wsplits.reserve(n);
|
w->wsplits.reserve(n);
|
||||||
@ -257,7 +246,7 @@ int DTreesImpl::addTree(const vector<int>& sidx )
|
|||||||
w->wsplits.clear();
|
w->wsplits.clear();
|
||||||
w->wsubsets.clear();
|
w->wsubsets.clear();
|
||||||
|
|
||||||
int cv_n = params.CVFolds;
|
int cv_n = params.getCVFolds();
|
||||||
|
|
||||||
if( cv_n > 0 )
|
if( cv_n > 0 )
|
||||||
{
|
{
|
||||||
@ -347,34 +336,9 @@ int DTreesImpl::addTree(const vector<int>& sidx )
|
|||||||
return root;
|
return root;
|
||||||
}
|
}
|
||||||
|
|
||||||
DTrees::Params DTreesImpl::getDParams() const
|
void DTreesImpl::setDParams(const TreeParams& _params)
|
||||||
{
|
{
|
||||||
return params0;
|
params = _params;
|
||||||
}
|
|
||||||
|
|
||||||
void DTreesImpl::setDParams(const Params& _params)
|
|
||||||
{
|
|
||||||
params0 = params = _params;
|
|
||||||
if( params.maxCategories < 2 )
|
|
||||||
CV_Error( CV_StsOutOfRange, "params.max_categories should be >= 2" );
|
|
||||||
params.maxCategories = std::min( params.maxCategories, 15 );
|
|
||||||
|
|
||||||
if( params.maxDepth < 0 )
|
|
||||||
CV_Error( CV_StsOutOfRange, "params.max_depth should be >= 0" );
|
|
||||||
params.maxDepth = std::min( params.maxDepth, 25 );
|
|
||||||
|
|
||||||
params.minSampleCount = std::max(params.minSampleCount, 1);
|
|
||||||
|
|
||||||
if( params.CVFolds < 0 )
|
|
||||||
CV_Error( CV_StsOutOfRange,
|
|
||||||
"params.CVFolds should be =0 (the tree is not pruned) "
|
|
||||||
"or n>0 (tree is pruned using n-fold cross-validation)" );
|
|
||||||
|
|
||||||
if( params.CVFolds == 1 )
|
|
||||||
params.CVFolds = 0;
|
|
||||||
|
|
||||||
if( params.regressionAccuracy < 0 )
|
|
||||||
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
||||||
@ -385,7 +349,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
|||||||
|
|
||||||
node.parent = parent;
|
node.parent = parent;
|
||||||
node.depth = parent >= 0 ? w->wnodes[parent].depth + 1 : 0;
|
node.depth = parent >= 0 ? w->wnodes[parent].depth + 1 : 0;
|
||||||
int nfolds = params.CVFolds;
|
int nfolds = params.getCVFolds();
|
||||||
|
|
||||||
if( nfolds > 0 )
|
if( nfolds > 0 )
|
||||||
{
|
{
|
||||||
@ -400,7 +364,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
|||||||
|
|
||||||
calcValue( nidx, sidx );
|
calcValue( nidx, sidx );
|
||||||
|
|
||||||
if( n <= params.minSampleCount || node.depth >= params.maxDepth )
|
if( n <= params.getMinSampleCount() || node.depth >= params.getMaxDepth() )
|
||||||
can_split = false;
|
can_split = false;
|
||||||
else if( _isClassifier )
|
else if( _isClassifier )
|
||||||
{
|
{
|
||||||
@ -415,7 +379,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if( sqrt(node.node_risk) < params.regressionAccuracy )
|
if( sqrt(node.node_risk) < params.getRegressionAccuracy() )
|
||||||
can_split = false;
|
can_split = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -493,7 +457,7 @@ int DTreesImpl::findBestSplit( const vector<int>& _sidx )
|
|||||||
void DTreesImpl::calcValue( int nidx, const vector<int>& _sidx )
|
void DTreesImpl::calcValue( int nidx, const vector<int>& _sidx )
|
||||||
{
|
{
|
||||||
WNode* node = &w->wnodes[nidx];
|
WNode* node = &w->wnodes[nidx];
|
||||||
int i, j, k, n = (int)_sidx.size(), cv_n = params.CVFolds;
|
int i, j, k, n = (int)_sidx.size(), cv_n = params.getCVFolds();
|
||||||
int m = (int)classLabels.size();
|
int m = (int)classLabels.size();
|
||||||
|
|
||||||
cv::AutoBuffer<double> buf(std::max(m, 3)*(cv_n+1));
|
cv::AutoBuffer<double> buf(std::max(m, 3)*(cv_n+1));
|
||||||
@ -841,8 +805,8 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector<int>& _si
|
|||||||
int m = (int)classLabels.size();
|
int m = (int)classLabels.size();
|
||||||
|
|
||||||
int base_size = m*(3 + mi) + mi + 1;
|
int base_size = m*(3 + mi) + mi + 1;
|
||||||
if( m > 2 && mi > params.maxCategories )
|
if( m > 2 && mi > params.getMaxCategories() )
|
||||||
base_size += m*std::min(params.maxCategories, n) + mi;
|
base_size += m*std::min(params.getMaxCategories(), n) + mi;
|
||||||
else
|
else
|
||||||
base_size += mi;
|
base_size += mi;
|
||||||
AutoBuffer<double> buf(base_size + n);
|
AutoBuffer<double> buf(base_size + n);
|
||||||
@ -880,9 +844,9 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector<int>& _si
|
|||||||
|
|
||||||
if( m > 2 )
|
if( m > 2 )
|
||||||
{
|
{
|
||||||
if( mi > params.maxCategories )
|
if( mi > params.getMaxCategories() )
|
||||||
{
|
{
|
||||||
mi = std::min(params.maxCategories, n);
|
mi = std::min(params.getMaxCategories(), n);
|
||||||
cjk = c_weights + _mi;
|
cjk = c_weights + _mi;
|
||||||
cluster_labels = (int*)(cjk + m*mi);
|
cluster_labels = (int*)(cjk + m*mi);
|
||||||
clusterCategories( _cjk, _mi, m, cjk, mi, cluster_labels );
|
clusterCategories( _cjk, _mi, m, cjk, mi, cluster_labels );
|
||||||
@ -1228,7 +1192,7 @@ int DTreesImpl::pruneCV( int root )
|
|||||||
// 2. choose the best tree index (if need, apply 1SE rule).
|
// 2. choose the best tree index (if need, apply 1SE rule).
|
||||||
// 3. store the best index and cut the branches.
|
// 3. store the best index and cut the branches.
|
||||||
|
|
||||||
int ti, tree_count = 0, j, cv_n = params.CVFolds, n = w->wnodes[root].sample_count;
|
int ti, tree_count = 0, j, cv_n = params.getCVFolds(), n = w->wnodes[root].sample_count;
|
||||||
// currently, 1SE for regression is not implemented
|
// currently, 1SE for regression is not implemented
|
||||||
bool use_1se = params.use1SERule != 0 && _isClassifier;
|
bool use_1se = params.use1SERule != 0 && _isClassifier;
|
||||||
double min_err = 0, min_err_se = 0;
|
double min_err = 0, min_err_se = 0;
|
||||||
@ -1294,7 +1258,7 @@ int DTreesImpl::pruneCV( int root )
|
|||||||
|
|
||||||
double DTreesImpl::updateTreeRNC( int root, double T, int fold )
|
double DTreesImpl::updateTreeRNC( int root, double T, int fold )
|
||||||
{
|
{
|
||||||
int nidx = root, pidx = -1, cv_n = params.CVFolds;
|
int nidx = root, pidx = -1, cv_n = params.getCVFolds();
|
||||||
double min_alpha = DBL_MAX;
|
double min_alpha = DBL_MAX;
|
||||||
|
|
||||||
for(;;)
|
for(;;)
|
||||||
@ -1350,7 +1314,7 @@ double DTreesImpl::updateTreeRNC( int root, double T, int fold )
|
|||||||
|
|
||||||
bool DTreesImpl::cutTree( int root, double T, int fold, double min_alpha )
|
bool DTreesImpl::cutTree( int root, double T, int fold, double min_alpha )
|
||||||
{
|
{
|
||||||
int cv_n = params.CVFolds, nidx = root, pidx = -1;
|
int cv_n = params.getCVFolds(), nidx = root, pidx = -1;
|
||||||
WNode* node = &w->wnodes[root];
|
WNode* node = &w->wnodes[root];
|
||||||
if( node->left < 0 )
|
if( node->left < 0 )
|
||||||
return true;
|
return true;
|
||||||
@ -1560,19 +1524,19 @@ float DTreesImpl::predict( InputArray _samples, OutputArray _results, int flags
|
|||||||
|
|
||||||
void DTreesImpl::writeTrainingParams(FileStorage& fs) const
|
void DTreesImpl::writeTrainingParams(FileStorage& fs) const
|
||||||
{
|
{
|
||||||
fs << "use_surrogates" << (params0.useSurrogates ? 1 : 0);
|
fs << "use_surrogates" << (params.useSurrogates ? 1 : 0);
|
||||||
fs << "max_categories" << params0.maxCategories;
|
fs << "max_categories" << params.getMaxCategories();
|
||||||
fs << "regression_accuracy" << params0.regressionAccuracy;
|
fs << "regression_accuracy" << params.getRegressionAccuracy();
|
||||||
|
|
||||||
fs << "max_depth" << params0.maxDepth;
|
fs << "max_depth" << params.getMaxDepth();
|
||||||
fs << "min_sample_count" << params0.minSampleCount;
|
fs << "min_sample_count" << params.getMinSampleCount();
|
||||||
fs << "cross_validation_folds" << params0.CVFolds;
|
fs << "cross_validation_folds" << params.getCVFolds();
|
||||||
|
|
||||||
if( params0.CVFolds > 1 )
|
if( params.getCVFolds() > 1 )
|
||||||
fs << "use_1se_rule" << (params0.use1SERule ? 1 : 0);
|
fs << "use_1se_rule" << (params.use1SERule ? 1 : 0);
|
||||||
|
|
||||||
if( !params0.priors.empty() )
|
if( !params.priors.empty() )
|
||||||
fs << "priors" << params0.priors;
|
fs << "priors" << params.priors;
|
||||||
}
|
}
|
||||||
|
|
||||||
void DTreesImpl::writeParams(FileStorage& fs) const
|
void DTreesImpl::writeParams(FileStorage& fs) const
|
||||||
@ -1724,18 +1688,18 @@ void DTreesImpl::readParams( const FileNode& fn )
|
|||||||
|
|
||||||
FileNode tparams_node = fn["training_params"];
|
FileNode tparams_node = fn["training_params"];
|
||||||
|
|
||||||
params0 = Params();
|
TreeParams params0 = TreeParams();
|
||||||
|
|
||||||
if( !tparams_node.empty() ) // training parameters are not necessary
|
if( !tparams_node.empty() ) // training parameters are not necessary
|
||||||
{
|
{
|
||||||
params0.useSurrogates = (int)tparams_node["use_surrogates"] != 0;
|
params0.useSurrogates = (int)tparams_node["use_surrogates"] != 0;
|
||||||
params0.maxCategories = (int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"]);
|
params0.setMaxCategories((int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"]));
|
||||||
params0.regressionAccuracy = (float)tparams_node["regression_accuracy"];
|
params0.setRegressionAccuracy((float)tparams_node["regression_accuracy"]);
|
||||||
params0.maxDepth = (int)tparams_node["max_depth"];
|
params0.setMaxDepth((int)tparams_node["max_depth"]);
|
||||||
params0.minSampleCount = (int)tparams_node["min_sample_count"];
|
params0.setMinSampleCount((int)tparams_node["min_sample_count"]);
|
||||||
params0.CVFolds = (int)tparams_node["cross_validation_folds"];
|
params0.setCVFolds((int)tparams_node["cross_validation_folds"]);
|
||||||
|
|
||||||
if( params0.CVFolds > 1 )
|
if( params0.getCVFolds() > 1 )
|
||||||
{
|
{
|
||||||
params.use1SERule = (int)tparams_node["use_1se_rule"] != 0;
|
params.use1SERule = (int)tparams_node["use_1se_rule"] != 0;
|
||||||
}
|
}
|
||||||
@ -1964,11 +1928,9 @@ void DTreesImpl::read( const FileNode& fn )
|
|||||||
readTree(fnodes);
|
readTree(fnodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DTrees> DTrees::create(const DTrees::Params& params)
|
Ptr<DTrees> DTrees::create()
|
||||||
{
|
{
|
||||||
Ptr<DTreesImpl> p = makePtr<DTreesImpl>();
|
return makePtr<DTreesImpl>();
|
||||||
p->setDParams(params);
|
|
||||||
return p;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -330,7 +330,8 @@ void CV_KNearestTest::run( int /*start_from*/ )
|
|||||||
}
|
}
|
||||||
|
|
||||||
// KNearest KDTree implementation
|
// KNearest KDTree implementation
|
||||||
Ptr<KNearest> knearestKdt = KNearest::create(ml::KNearest::Params(10, true, INT_MAX, ml::KNearest::KDTREE));
|
Ptr<KNearest> knearestKdt = KNearest::create();
|
||||||
|
knearestKdt->setAlgorithmType(KNearest::KDTREE);
|
||||||
knearestKdt->train(trainData, ml::ROW_SAMPLE, trainLabels);
|
knearestKdt->train(trainData, ml::ROW_SAMPLE, trainLabels);
|
||||||
knearestKdt->findNearest(testData, 4, bestLabels);
|
knearestKdt->findNearest(testData, 4, bestLabels);
|
||||||
if( !calcErr( bestLabels, testLabels, sizes, err, true ) )
|
if( !calcErr( bestLabels, testLabels, sizes, err, true ) )
|
||||||
@ -394,16 +395,18 @@ int CV_EMTest::runCase( int caseIndex, const EM_Params& params,
|
|||||||
cv::Mat labels;
|
cv::Mat labels;
|
||||||
float err;
|
float err;
|
||||||
|
|
||||||
Ptr<EM> em;
|
Ptr<EM> em = EM::create();
|
||||||
EM::Params emp(params.nclusters, params.covMatType, params.termCrit);
|
em->setClustersNumber(params.nclusters);
|
||||||
|
em->setCovarianceMatrixType(params.covMatType);
|
||||||
|
em->setTermCriteria(params.termCrit);
|
||||||
if( params.startStep == EM::START_AUTO_STEP )
|
if( params.startStep == EM::START_AUTO_STEP )
|
||||||
em = EM::train( trainData, noArray(), labels, noArray(), emp );
|
em->trainEM( trainData, noArray(), labels, noArray() );
|
||||||
else if( params.startStep == EM::START_E_STEP )
|
else if( params.startStep == EM::START_E_STEP )
|
||||||
em = EM::train_startWithE( trainData, *params.means, *params.covs,
|
em->trainE( trainData, *params.means, *params.covs,
|
||||||
*params.weights, noArray(), labels, noArray(), emp );
|
*params.weights, noArray(), labels, noArray() );
|
||||||
else if( params.startStep == EM::START_M_STEP )
|
else if( params.startStep == EM::START_M_STEP )
|
||||||
em = EM::train_startWithM( trainData, *params.probs,
|
em->trainM( trainData, *params.probs,
|
||||||
noArray(), labels, noArray(), emp );
|
noArray(), labels, noArray() );
|
||||||
|
|
||||||
// check train error
|
// check train error
|
||||||
if( !calcErr( labels, trainLabels, sizes, err , false, false ) )
|
if( !calcErr( labels, trainLabels, sizes, err , false, false ) )
|
||||||
@ -543,7 +546,9 @@ protected:
|
|||||||
|
|
||||||
Mat labels;
|
Mat labels;
|
||||||
|
|
||||||
Ptr<EM> em = EM::train(samples, noArray(), labels, noArray(), EM::Params(nclusters));
|
Ptr<EM> em = EM::create();
|
||||||
|
em->setClustersNumber(nclusters);
|
||||||
|
em->trainEM(samples, noArray(), labels, noArray());
|
||||||
|
|
||||||
Mat firstResult(samples.rows, 1, CV_32SC1);
|
Mat firstResult(samples.rows, 1, CV_32SC1);
|
||||||
for( int i = 0; i < samples.rows; i++)
|
for( int i = 0; i < samples.rows; i++)
|
||||||
@ -644,8 +649,13 @@ protected:
|
|||||||
samples1.push_back(sample);
|
samples1.push_back(sample);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ptr<EM> model0 = EM::train(samples0, noArray(), noArray(), noArray(), EM::Params(3));
|
Ptr<EM> model0 = EM::create();
|
||||||
Ptr<EM> model1 = EM::train(samples1, noArray(), noArray(), noArray(), EM::Params(3));
|
model0->setClustersNumber(3);
|
||||||
|
model0->trainEM(samples0, noArray(), noArray(), noArray());
|
||||||
|
|
||||||
|
Ptr<EM> model1 = EM::create();
|
||||||
|
model1->setClustersNumber(3);
|
||||||
|
model1->trainEM(samples1, noArray(), noArray(), noArray());
|
||||||
|
|
||||||
Mat trainConfusionMat(2, 2, CV_32SC1, Scalar(0)),
|
Mat trainConfusionMat(2, 2, CV_32SC1, Scalar(0)),
|
||||||
testConfusionMat(2, 2, CV_32SC1, Scalar(0));
|
testConfusionMat(2, 2, CV_32SC1, Scalar(0));
|
||||||
|
@ -95,16 +95,13 @@ void CV_LRTest::run( int /*start_from*/ )
|
|||||||
string dataFileName = ts->get_data_path() + "iris.data";
|
string dataFileName = ts->get_data_path() + "iris.data";
|
||||||
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
|
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
|
||||||
|
|
||||||
LogisticRegression::Params params = LogisticRegression::Params();
|
|
||||||
params.alpha = 1.0;
|
|
||||||
params.num_iters = 10001;
|
|
||||||
params.norm = LogisticRegression::REG_L2;
|
|
||||||
params.regularized = 1;
|
|
||||||
params.train_method = LogisticRegression::BATCH;
|
|
||||||
params.mini_batch_size = 10;
|
|
||||||
|
|
||||||
// run LR classifier train classifier
|
// run LR classifier train classifier
|
||||||
Ptr<LogisticRegression> p = LogisticRegression::create(params);
|
Ptr<LogisticRegression> p = LogisticRegression::create();
|
||||||
|
p->setLearningRate(1.0);
|
||||||
|
p->setIterations(10001);
|
||||||
|
p->setRegularization(LogisticRegression::REG_L2);
|
||||||
|
p->setTrainMethod(LogisticRegression::BATCH);
|
||||||
|
p->setMiniBatchSize(10);
|
||||||
p->train(tdata);
|
p->train(tdata);
|
||||||
|
|
||||||
// predict using the same data
|
// predict using the same data
|
||||||
@ -157,20 +154,17 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ )
|
|||||||
Mat responses1, responses2;
|
Mat responses1, responses2;
|
||||||
Mat learnt_mat1, learnt_mat2;
|
Mat learnt_mat1, learnt_mat2;
|
||||||
|
|
||||||
LogisticRegression::Params params1 = LogisticRegression::Params();
|
|
||||||
params1.alpha = 1.0;
|
|
||||||
params1.num_iters = 10001;
|
|
||||||
params1.norm = LogisticRegression::REG_L2;
|
|
||||||
params1.regularized = 1;
|
|
||||||
params1.train_method = LogisticRegression::BATCH;
|
|
||||||
params1.mini_batch_size = 10;
|
|
||||||
|
|
||||||
// train and save the classifier
|
// train and save the classifier
|
||||||
String filename = tempfile(".xml");
|
String filename = tempfile(".xml");
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
// run LR classifier train classifier
|
// run LR classifier train classifier
|
||||||
Ptr<LogisticRegression> lr1 = LogisticRegression::create(params1);
|
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
|
||||||
|
lr1->setLearningRate(1.0);
|
||||||
|
lr1->setIterations(10001);
|
||||||
|
lr1->setRegularization(LogisticRegression::REG_L2);
|
||||||
|
lr1->setTrainMethod(LogisticRegression::BATCH);
|
||||||
|
lr1->setMiniBatchSize(10);
|
||||||
lr1->train(tdata);
|
lr1->train(tdata);
|
||||||
lr1->predict(tdata->getSamples(), responses1);
|
lr1->predict(tdata->getSamples(), responses1);
|
||||||
learnt_mat1 = lr1->get_learnt_thetas();
|
learnt_mat1 = lr1->get_learnt_thetas();
|
||||||
|
@ -73,30 +73,14 @@ int str_to_svm_kernel_type( String& str )
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<SVM> svm_train_auto( Ptr<TrainData> _data, SVM::Params _params,
|
|
||||||
int k_fold, ParamGrid C_grid, ParamGrid gamma_grid,
|
|
||||||
ParamGrid p_grid, ParamGrid nu_grid, ParamGrid coef_grid,
|
|
||||||
ParamGrid degree_grid )
|
|
||||||
{
|
|
||||||
Mat _train_data = _data->getSamples();
|
|
||||||
Mat _responses = _data->getResponses();
|
|
||||||
Mat _var_idx = _data->getVarIdx();
|
|
||||||
Mat _sample_idx = _data->getTrainSampleIdx();
|
|
||||||
|
|
||||||
Ptr<SVM> svm = SVM::create(_params);
|
|
||||||
if( svm->trainAuto( _data, k_fold, C_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid ) )
|
|
||||||
return svm;
|
|
||||||
return Ptr<SVM>();
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. em
|
// 4. em
|
||||||
// 5. ann
|
// 5. ann
|
||||||
int str_to_ann_train_method( String& str )
|
int str_to_ann_train_method( String& str )
|
||||||
{
|
{
|
||||||
if( !str.compare("BACKPROP") )
|
if( !str.compare("BACKPROP") )
|
||||||
return ANN_MLP::Params::BACKPROP;
|
return ANN_MLP::BACKPROP;
|
||||||
if( !str.compare("RPROP") )
|
if( !str.compare("RPROP") )
|
||||||
return ANN_MLP::Params::RPROP;
|
return ANN_MLP::RPROP;
|
||||||
CV_Error( CV_StsBadArg, "incorrect ann train method string" );
|
CV_Error( CV_StsBadArg, "incorrect ann train method string" );
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -343,16 +327,16 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
|||||||
String svm_type_str, kernel_type_str;
|
String svm_type_str, kernel_type_str;
|
||||||
modelParamsNode["svm_type"] >> svm_type_str;
|
modelParamsNode["svm_type"] >> svm_type_str;
|
||||||
modelParamsNode["kernel_type"] >> kernel_type_str;
|
modelParamsNode["kernel_type"] >> kernel_type_str;
|
||||||
SVM::Params params;
|
Ptr<SVM> m = SVM::create();
|
||||||
params.svmType = str_to_svm_type( svm_type_str );
|
m->setType(str_to_svm_type( svm_type_str ));
|
||||||
params.kernelType = str_to_svm_kernel_type( kernel_type_str );
|
m->setKernel(str_to_svm_kernel_type( kernel_type_str ));
|
||||||
modelParamsNode["degree"] >> params.degree;
|
m->setDegree(modelParamsNode["degree"]);
|
||||||
modelParamsNode["gamma"] >> params.gamma;
|
m->setGamma(modelParamsNode["gamma"]);
|
||||||
modelParamsNode["coef0"] >> params.coef0;
|
m->setCoef0(modelParamsNode["coef0"]);
|
||||||
modelParamsNode["C"] >> params.C;
|
m->setC(modelParamsNode["C"]);
|
||||||
modelParamsNode["nu"] >> params.nu;
|
m->setNu(modelParamsNode["nu"]);
|
||||||
modelParamsNode["p"] >> params.p;
|
m->setP(modelParamsNode["p"]);
|
||||||
model = SVM::create(params);
|
model = m;
|
||||||
}
|
}
|
||||||
else if( modelName == CV_EM )
|
else if( modelName == CV_EM )
|
||||||
{
|
{
|
||||||
@ -371,9 +355,13 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
|||||||
data->getVarIdx(), data->getTrainSampleIdx());
|
data->getVarIdx(), data->getTrainSampleIdx());
|
||||||
int layer_sz[] = { data->getNAllVars(), 100, 100, (int)cls_map.size() };
|
int layer_sz[] = { data->getNAllVars(), 100, 100, (int)cls_map.size() };
|
||||||
Mat layer_sizes( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
|
Mat layer_sizes( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
|
||||||
model = ANN_MLP::create(ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0,
|
Ptr<ANN_MLP> m = ANN_MLP::create();
|
||||||
TermCriteria(TermCriteria::COUNT,300,0.01),
|
m->setLayerSizes(layer_sizes);
|
||||||
str_to_ann_train_method(train_method_str), param1, param2));
|
m->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0);
|
||||||
|
m->setTermCriteria(TermCriteria(TermCriteria::COUNT,300,0.01));
|
||||||
|
m->setTrainMethod(str_to_ann_train_method(train_method_str), param1, param2);
|
||||||
|
model = m;
|
||||||
|
|
||||||
}
|
}
|
||||||
else if( modelName == CV_DTREE )
|
else if( modelName == CV_DTREE )
|
||||||
{
|
{
|
||||||
@ -386,8 +374,18 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
|||||||
modelParamsNode["max_categories"] >> MAX_CATEGORIES;
|
modelParamsNode["max_categories"] >> MAX_CATEGORIES;
|
||||||
modelParamsNode["cv_folds"] >> CV_FOLDS;
|
modelParamsNode["cv_folds"] >> CV_FOLDS;
|
||||||
modelParamsNode["is_pruned"] >> IS_PRUNED;
|
modelParamsNode["is_pruned"] >> IS_PRUNED;
|
||||||
model = DTrees::create(DTrees::Params(MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY, USE_SURROGATE,
|
|
||||||
MAX_CATEGORIES, CV_FOLDS, false, IS_PRUNED, Mat() ));
|
Ptr<DTrees> m = DTrees::create();
|
||||||
|
m->setMaxDepth(MAX_DEPTH);
|
||||||
|
m->setMinSampleCount(MIN_SAMPLE_COUNT);
|
||||||
|
m->setRegressionAccuracy(REG_ACCURACY);
|
||||||
|
m->setUseSurrogates(USE_SURROGATE);
|
||||||
|
m->setMaxCategories(MAX_CATEGORIES);
|
||||||
|
m->setCVFolds(CV_FOLDS);
|
||||||
|
m->setUse1SERule(false);
|
||||||
|
m->setTruncatePrunedTree(IS_PRUNED);
|
||||||
|
m->setPriors(Mat());
|
||||||
|
model = m;
|
||||||
}
|
}
|
||||||
else if( modelName == CV_BOOST )
|
else if( modelName == CV_BOOST )
|
||||||
{
|
{
|
||||||
@ -401,7 +399,15 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
|||||||
modelParamsNode["weight_trim_rate"] >> WEIGHT_TRIM_RATE;
|
modelParamsNode["weight_trim_rate"] >> WEIGHT_TRIM_RATE;
|
||||||
modelParamsNode["max_depth"] >> MAX_DEPTH;
|
modelParamsNode["max_depth"] >> MAX_DEPTH;
|
||||||
//modelParamsNode["use_surrogate"] >> USE_SURROGATE;
|
//modelParamsNode["use_surrogate"] >> USE_SURROGATE;
|
||||||
model = Boost::create( Boost::Params(BOOST_TYPE, WEAK_COUNT, WEIGHT_TRIM_RATE, MAX_DEPTH, USE_SURROGATE, Mat()) );
|
|
||||||
|
Ptr<Boost> m = Boost::create();
|
||||||
|
m->setBoostType(BOOST_TYPE);
|
||||||
|
m->setWeakCount(WEAK_COUNT);
|
||||||
|
m->setWeightTrimRate(WEIGHT_TRIM_RATE);
|
||||||
|
m->setMaxDepth(MAX_DEPTH);
|
||||||
|
m->setUseSurrogates(USE_SURROGATE);
|
||||||
|
m->setPriors(Mat());
|
||||||
|
model = m;
|
||||||
}
|
}
|
||||||
else if( modelName == CV_RTREES )
|
else if( modelName == CV_RTREES )
|
||||||
{
|
{
|
||||||
@ -416,9 +422,18 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
|||||||
modelParamsNode["is_pruned"] >> IS_PRUNED;
|
modelParamsNode["is_pruned"] >> IS_PRUNED;
|
||||||
modelParamsNode["nactive_vars"] >> NACTIVE_VARS;
|
modelParamsNode["nactive_vars"] >> NACTIVE_VARS;
|
||||||
modelParamsNode["max_trees_num"] >> MAX_TREES_NUM;
|
modelParamsNode["max_trees_num"] >> MAX_TREES_NUM;
|
||||||
model = RTrees::create(RTrees::Params( MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY,
|
|
||||||
USE_SURROGATE, MAX_CATEGORIES, Mat(), true, // (calc_var_importance == true) <=> RF processes variable importance
|
Ptr<RTrees> m = RTrees::create();
|
||||||
NACTIVE_VARS, TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS)));
|
m->setMaxDepth(MAX_DEPTH);
|
||||||
|
m->setMinSampleCount(MIN_SAMPLE_COUNT);
|
||||||
|
m->setRegressionAccuracy(REG_ACCURACY);
|
||||||
|
m->setUseSurrogates(USE_SURROGATE);
|
||||||
|
m->setMaxCategories(MAX_CATEGORIES);
|
||||||
|
m->setPriors(Mat());
|
||||||
|
m->setCalculateVarImportance(true);
|
||||||
|
m->setActiveVarCount(NACTIVE_VARS);
|
||||||
|
m->setTermCriteria(TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS));
|
||||||
|
model = m;
|
||||||
}
|
}
|
||||||
|
|
||||||
if( !model.empty() )
|
if( !model.empty() )
|
||||||
|
@ -149,9 +149,8 @@ int CV_SLMLTest::validate_test_results( int testCaseIdx )
|
|||||||
}
|
}
|
||||||
|
|
||||||
TEST(ML_NaiveBayes, save_load) { CV_SLMLTest test( CV_NBAYES ); test.safe_run(); }
|
TEST(ML_NaiveBayes, save_load) { CV_SLMLTest test( CV_NBAYES ); test.safe_run(); }
|
||||||
//CV_SLMLTest lsmlknearest( CV_KNEAREST, "slknearest" ); // does not support save!
|
TEST(ML_KNearest, save_load) { CV_SLMLTest test( CV_KNEAREST ); test.safe_run(); }
|
||||||
TEST(ML_SVM, save_load) { CV_SLMLTest test( CV_SVM ); test.safe_run(); }
|
TEST(ML_SVM, save_load) { CV_SLMLTest test( CV_SVM ); test.safe_run(); }
|
||||||
//CV_SLMLTest lsmlem( CV_EM, "slem" ); // does not support save!
|
|
||||||
TEST(ML_ANN, save_load) { CV_SLMLTest test( CV_ANN ); test.safe_run(); }
|
TEST(ML_ANN, save_load) { CV_SLMLTest test( CV_ANN ); test.safe_run(); }
|
||||||
TEST(ML_DTree, save_load) { CV_SLMLTest test( CV_DTREE ); test.safe_run(); }
|
TEST(ML_DTree, save_load) { CV_SLMLTest test( CV_DTREE ); test.safe_run(); }
|
||||||
TEST(ML_Boost, save_load) { CV_SLMLTest test( CV_BOOST ); test.safe_run(); }
|
TEST(ML_Boost, save_load) { CV_SLMLTest test( CV_BOOST ); test.safe_run(); }
|
||||||
|
@ -52,11 +52,6 @@
|
|||||||
@defgroup shape Shape Distance and Matching
|
@defgroup shape Shape Distance and Matching
|
||||||
*/
|
*/
|
||||||
|
|
||||||
namespace cv
|
|
||||||
{
|
|
||||||
CV_EXPORTS bool initModule_shape();
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* End of file. */
|
/* End of file. */
|
||||||
|
@ -66,8 +66,6 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
//! the main operator
|
//! the main operator
|
||||||
virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape, std::vector<DMatch> &matches);
|
virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape, std::vector<DMatch> &matches);
|
||||||
virtual float applyTransformation(InputArray input, OutputArray output=noArray());
|
virtual float applyTransformation(InputArray input, OutputArray output=noArray());
|
||||||
|
@ -60,8 +60,6 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
//! the main operator
|
//! the main operator
|
||||||
virtual float computeDistance(InputArray contour1, InputArray contour2);
|
virtual float computeDistance(InputArray contour1, InputArray contour2);
|
||||||
|
|
||||||
|
@ -62,8 +62,6 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
//! the main operator
|
//! the main operator
|
||||||
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
|
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
|
||||||
|
|
||||||
@ -189,8 +187,6 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
//! the main operator
|
//! the main operator
|
||||||
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
|
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
|
||||||
|
|
||||||
@ -327,8 +323,6 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
//! the main operator
|
//! the main operator
|
||||||
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
|
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
|
||||||
|
|
||||||
@ -445,8 +439,6 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
//! the main operator
|
//! the main operator
|
||||||
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
|
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
|
||||||
|
|
||||||
|
@ -79,8 +79,6 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
//! the main operator
|
//! the main operator
|
||||||
virtual float computeDistance(InputArray contour1, InputArray contour2);
|
virtual float computeDistance(InputArray contour1, InputArray contour2);
|
||||||
|
|
||||||
|
@ -68,8 +68,6 @@ public:
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
//! the main operators
|
//! the main operators
|
||||||
virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape, std::vector<DMatch> &matches);
|
virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape, std::vector<DMatch> &matches);
|
||||||
virtual float applyTransformation(InputArray inPts, OutputArray output=noArray());
|
virtual float applyTransformation(InputArray inPts, OutputArray output=noArray());
|
||||||
|
@ -44,6 +44,7 @@
|
|||||||
#define __OPENCV_SUPERRES_HPP__
|
#define __OPENCV_SUPERRES_HPP__
|
||||||
|
|
||||||
#include "opencv2/core.hpp"
|
#include "opencv2/core.hpp"
|
||||||
|
#include "opencv2/superres/optical_flow.hpp"
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@defgroup superres Super Resolution
|
@defgroup superres Super Resolution
|
||||||
@ -62,8 +63,6 @@ namespace cv
|
|||||||
//! @addtogroup superres
|
//! @addtogroup superres
|
||||||
//! @{
|
//! @{
|
||||||
|
|
||||||
CV_EXPORTS bool initModule_superres();
|
|
||||||
|
|
||||||
class CV_EXPORTS FrameSource
|
class CV_EXPORTS FrameSource
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
@ -105,6 +104,36 @@ namespace cv
|
|||||||
*/
|
*/
|
||||||
virtual void collectGarbage();
|
virtual void collectGarbage();
|
||||||
|
|
||||||
|
//! @brief Scale factor
|
||||||
|
CV_PURE_PROPERTY(int, Scale)
|
||||||
|
|
||||||
|
//! @brief Iterations count
|
||||||
|
CV_PURE_PROPERTY(int, Iterations)
|
||||||
|
|
||||||
|
//! @brief Asymptotic value of steepest descent method
|
||||||
|
CV_PURE_PROPERTY(double, Tau)
|
||||||
|
|
||||||
|
//! @brief Weight parameter to balance data term and smoothness term
|
||||||
|
CV_PURE_PROPERTY(double, Labmda)
|
||||||
|
|
||||||
|
//! @brief Parameter of spacial distribution in Bilateral-TV
|
||||||
|
CV_PURE_PROPERTY(double, Alpha)
|
||||||
|
|
||||||
|
//! @brief Kernel size of Bilateral-TV filter
|
||||||
|
CV_PURE_PROPERTY(int, KernelSize)
|
||||||
|
|
||||||
|
//! @brief Gaussian blur kernel size
|
||||||
|
CV_PURE_PROPERTY(int, BlurKernelSize)
|
||||||
|
|
||||||
|
//! @brief Gaussian blur sigma
|
||||||
|
CV_PURE_PROPERTY(double, BlurSigma)
|
||||||
|
|
||||||
|
//! @brief Radius of the temporal search area
|
||||||
|
CV_PURE_PROPERTY(int, TemporalAreaRadius)
|
||||||
|
|
||||||
|
//! @brief Dense optical flow algorithm
|
||||||
|
CV_PURE_PROPERTY_S(Ptr<cv::superres::DenseOpticalFlowExt>, OpticalFlow)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
SuperResolution();
|
SuperResolution();
|
||||||
|
|
||||||
@ -139,7 +168,6 @@ namespace cv
|
|||||||
*/
|
*/
|
||||||
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1();
|
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1();
|
||||||
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_CUDA();
|
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_CUDA();
|
||||||
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_OCL();
|
|
||||||
|
|
||||||
//! @} superres
|
//! @} superres
|
||||||
|
|
||||||
|
@ -60,20 +60,68 @@ namespace cv
|
|||||||
virtual void collectGarbage() = 0;
|
virtual void collectGarbage() = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback();
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_CUDA();
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_OCL();
|
|
||||||
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();
|
class CV_EXPORTS FarnebackOpticalFlow : public virtual DenseOpticalFlowExt
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CV_PURE_PROPERTY(double, PyrScale)
|
||||||
|
CV_PURE_PROPERTY(int, LevelsNumber)
|
||||||
|
CV_PURE_PROPERTY(int, WindowSize)
|
||||||
|
CV_PURE_PROPERTY(int, Iterations)
|
||||||
|
CV_PURE_PROPERTY(int, PolyN)
|
||||||
|
CV_PURE_PROPERTY(double, PolySigma)
|
||||||
|
CV_PURE_PROPERTY(int, Flags)
|
||||||
|
};
|
||||||
|
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback();
|
||||||
|
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback_CUDA();
|
||||||
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1();
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_CUDA();
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_OCL();
|
|
||||||
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Brox_CUDA();
|
// CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();
|
||||||
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_CUDA();
|
|
||||||
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_OCL();
|
class CV_EXPORTS DualTVL1OpticalFlow : public virtual DenseOpticalFlowExt
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CV_PURE_PROPERTY(double, Tau)
|
||||||
|
CV_PURE_PROPERTY(double, Lambda)
|
||||||
|
CV_PURE_PROPERTY(double, Theta)
|
||||||
|
CV_PURE_PROPERTY(int, ScalesNumber)
|
||||||
|
CV_PURE_PROPERTY(int, WarpingsNumber)
|
||||||
|
CV_PURE_PROPERTY(double, Epsilon)
|
||||||
|
CV_PURE_PROPERTY(int, Iterations)
|
||||||
|
CV_PURE_PROPERTY(bool, UseInitialFlow)
|
||||||
|
};
|
||||||
|
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();
|
||||||
|
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1_CUDA();
|
||||||
|
|
||||||
|
|
||||||
|
class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
//! @brief Flow smoothness
|
||||||
|
CV_PURE_PROPERTY(double, Alpha)
|
||||||
|
//! @brief Gradient constancy importance
|
||||||
|
CV_PURE_PROPERTY(double, Gamma)
|
||||||
|
//! @brief Pyramid scale factor
|
||||||
|
CV_PURE_PROPERTY(double, ScaleFactor)
|
||||||
|
//! @brief Number of lagged non-linearity iterations (inner loop)
|
||||||
|
CV_PURE_PROPERTY(int, InnerIterations)
|
||||||
|
//! @brief Number of warping iterations (number of pyramid levels)
|
||||||
|
CV_PURE_PROPERTY(int, OuterIterations)
|
||||||
|
//! @brief Number of linear system solver iterations
|
||||||
|
CV_PURE_PROPERTY(int, SolverIterations)
|
||||||
|
};
|
||||||
|
CV_EXPORTS Ptr<BroxOpticalFlow> createOptFlow_Brox_CUDA();
|
||||||
|
|
||||||
|
|
||||||
|
class PyrLKOpticalFlow : public virtual DenseOpticalFlowExt
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
CV_PURE_PROPERTY(int, WindowSize)
|
||||||
|
CV_PURE_PROPERTY(int, MaxLevel)
|
||||||
|
CV_PURE_PROPERTY(int, Iterations)
|
||||||
|
};
|
||||||
|
CV_EXPORTS Ptr<PyrLKOpticalFlow> createOptFlow_PyrLK_CUDA();
|
||||||
|
|
||||||
//! @}
|
//! @}
|
||||||
|
|
||||||
|
@ -138,10 +138,10 @@ PERF_TEST_P(Size_MatType, SuperResolution_BTVL1,
|
|||||||
{
|
{
|
||||||
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1_CUDA();
|
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1_CUDA();
|
||||||
|
|
||||||
superRes->set("scale", scale);
|
superRes->setScale(scale);
|
||||||
superRes->set("iterations", iterations);
|
superRes->setIterations(iterations);
|
||||||
superRes->set("temporalAreaRadius", temporalAreaRadius);
|
superRes->setTemporalAreaRadius(temporalAreaRadius);
|
||||||
superRes->set("opticalFlow", opticalFlow);
|
superRes->setOpticalFlow(opticalFlow);
|
||||||
|
|
||||||
superRes->setInput(makePtr<OneFrameSource_CUDA>(GpuMat(frame)));
|
superRes->setInput(makePtr<OneFrameSource_CUDA>(GpuMat(frame)));
|
||||||
|
|
||||||
@ -156,10 +156,10 @@ PERF_TEST_P(Size_MatType, SuperResolution_BTVL1,
|
|||||||
{
|
{
|
||||||
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();
|
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();
|
||||||
|
|
||||||
superRes->set("scale", scale);
|
superRes->setScale(scale);
|
||||||
superRes->set("iterations", iterations);
|
superRes->setIterations(iterations);
|
||||||
superRes->set("temporalAreaRadius", temporalAreaRadius);
|
superRes->setTemporalAreaRadius(temporalAreaRadius);
|
||||||
superRes->set("opticalFlow", opticalFlow);
|
superRes->setOpticalFlow(opticalFlow);
|
||||||
|
|
||||||
superRes->setInput(makePtr<OneFrameSource_CPU>(frame));
|
superRes->setInput(makePtr<OneFrameSource_CPU>(frame));
|
||||||
|
|
||||||
@ -198,10 +198,10 @@ OCL_PERF_TEST_P(SuperResolution_BTVL1 ,BTVL1,
|
|||||||
Ptr<DenseOpticalFlowExt> opticalFlow(new ZeroOpticalFlow);
|
Ptr<DenseOpticalFlowExt> opticalFlow(new ZeroOpticalFlow);
|
||||||
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();
|
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();
|
||||||
|
|
||||||
superRes->set("scale", scale);
|
superRes->setScale(scale);
|
||||||
superRes->set("iterations", iterations);
|
superRes->setIterations(iterations);
|
||||||
superRes->set("temporalAreaRadius", temporalAreaRadius);
|
superRes->setTemporalAreaRadius(temporalAreaRadius);
|
||||||
superRes->set("opticalFlow", opticalFlow);
|
superRes->setOpticalFlow(opticalFlow);
|
||||||
|
|
||||||
superRes->setInput(makePtr<OneFrameSource_CPU>(frame));
|
superRes->setInput(makePtr<OneFrameSource_CPU>(frame));
|
||||||
|
|
||||||
|
@ -460,7 +460,7 @@ namespace
|
|||||||
func(_src, _dst, btvKernelSize, btvWeights);
|
func(_src, _dst, btvKernelSize, btvWeights);
|
||||||
}
|
}
|
||||||
|
|
||||||
class BTVL1_Base
|
class BTVL1_Base : public cv::superres::SuperResolution
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BTVL1_Base();
|
BTVL1_Base();
|
||||||
@ -470,6 +470,17 @@ namespace
|
|||||||
|
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(int, Scale, scale_)
|
||||||
|
CV_IMPL_PROPERTY(int, Iterations, iterations_)
|
||||||
|
CV_IMPL_PROPERTY(double, Tau, tau_)
|
||||||
|
CV_IMPL_PROPERTY(double, Labmda, lambda_)
|
||||||
|
CV_IMPL_PROPERTY(double, Alpha, alpha_)
|
||||||
|
CV_IMPL_PROPERTY(int, KernelSize, btvKernelSize_)
|
||||||
|
CV_IMPL_PROPERTY(int, BlurKernelSize, blurKernelSize_)
|
||||||
|
CV_IMPL_PROPERTY(double, BlurSigma, blurSigma_)
|
||||||
|
CV_IMPL_PROPERTY(int, TemporalAreaRadius, temporalAreaRadius_)
|
||||||
|
CV_IMPL_PROPERTY_S(Ptr<cv::superres::DenseOpticalFlowExt>, OpticalFlow, opticalFlow_)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int scale_;
|
int scale_;
|
||||||
int iterations_;
|
int iterations_;
|
||||||
@ -479,7 +490,8 @@ namespace
|
|||||||
int btvKernelSize_;
|
int btvKernelSize_;
|
||||||
int blurKernelSize_;
|
int blurKernelSize_;
|
||||||
double blurSigma_;
|
double blurSigma_;
|
||||||
Ptr<DenseOpticalFlowExt> opticalFlow_;
|
int temporalAreaRadius_; // not used in some implementations
|
||||||
|
Ptr<cv::superres::DenseOpticalFlowExt> opticalFlow_;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool ocl_process(InputArrayOfArrays src, OutputArray dst, InputArrayOfArrays forwardMotions,
|
bool ocl_process(InputArrayOfArrays src, OutputArray dst, InputArrayOfArrays forwardMotions,
|
||||||
@ -539,6 +551,7 @@ namespace
|
|||||||
btvKernelSize_ = 7;
|
btvKernelSize_ = 7;
|
||||||
blurKernelSize_ = 5;
|
blurKernelSize_ = 5;
|
||||||
blurSigma_ = 0.0;
|
blurSigma_ = 0.0;
|
||||||
|
temporalAreaRadius_ = 0;
|
||||||
opticalFlow_ = createOptFlow_Farneback();
|
opticalFlow_ = createOptFlow_Farneback();
|
||||||
|
|
||||||
curBlurKernelSize_ = -1;
|
curBlurKernelSize_ = -1;
|
||||||
@ -781,12 +794,9 @@ namespace
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
class BTVL1 :
|
class BTVL1 : public BTVL1_Base
|
||||||
public SuperResolution, private BTVL1_Base
|
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
BTVL1();
|
BTVL1();
|
||||||
|
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
@ -799,8 +809,6 @@ namespace
|
|||||||
bool ocl_processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
|
bool ocl_processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int temporalAreaRadius_;
|
|
||||||
|
|
||||||
void readNextFrame(Ptr<FrameSource>& frameSource);
|
void readNextFrame(Ptr<FrameSource>& frameSource);
|
||||||
bool ocl_readNextFrame(Ptr<FrameSource>& frameSource);
|
bool ocl_readNextFrame(Ptr<FrameSource>& frameSource);
|
||||||
|
|
||||||
@ -841,18 +849,6 @@ namespace
|
|||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(BTVL1, "SuperResolution.BTVL1",
|
|
||||||
obj.info()->addParam(obj, "scale", obj.scale_, false, 0, 0, "Scale factor.");
|
|
||||||
obj.info()->addParam(obj, "iterations", obj.iterations_, false, 0, 0, "Iteration count.");
|
|
||||||
obj.info()->addParam(obj, "tau", obj.tau_, false, 0, 0, "Asymptotic value of steepest descent method.");
|
|
||||||
obj.info()->addParam(obj, "lambda", obj.lambda_, false, 0, 0, "Weight parameter to balance data term and smoothness term.");
|
|
||||||
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Parameter of spacial distribution in Bilateral-TV.");
|
|
||||||
obj.info()->addParam(obj, "btvKernelSize", obj.btvKernelSize_, false, 0, 0, "Kernel size of Bilateral-TV filter.");
|
|
||||||
obj.info()->addParam(obj, "blurKernelSize", obj.blurKernelSize_, false, 0, 0, "Gaussian blur kernel size.");
|
|
||||||
obj.info()->addParam(obj, "blurSigma", obj.blurSigma_, false, 0, 0, "Gaussian blur sigma.");
|
|
||||||
obj.info()->addParam(obj, "temporalAreaRadius", obj.temporalAreaRadius_, false, 0, 0, "Radius of the temporal search area.");
|
|
||||||
obj.info()->addParam<DenseOpticalFlowExt>(obj, "opticalFlow", obj.opticalFlow_, false, 0, 0, "Dense optical flow algorithm."))
|
|
||||||
|
|
||||||
BTVL1::BTVL1()
|
BTVL1::BTVL1()
|
||||||
{
|
{
|
||||||
temporalAreaRadius_ = 4;
|
temporalAreaRadius_ = 4;
|
||||||
@ -1101,7 +1097,7 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1()
|
Ptr<cv::superres::SuperResolution> cv::superres::createSuperResolution_BTVL1()
|
||||||
{
|
{
|
||||||
return makePtr<BTVL1>();
|
return makePtr<BTVL1>();
|
||||||
}
|
}
|
||||||
|
@ -207,7 +207,7 @@ namespace
|
|||||||
funcs[src.channels()](src, dst, ksize);
|
funcs[src.channels()](src, dst, ksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
class BTVL1_CUDA_Base
|
class BTVL1_CUDA_Base : public cv::superres::SuperResolution
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
BTVL1_CUDA_Base();
|
BTVL1_CUDA_Base();
|
||||||
@ -218,6 +218,17 @@ namespace
|
|||||||
|
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(int, Scale, scale_)
|
||||||
|
CV_IMPL_PROPERTY(int, Iterations, iterations_)
|
||||||
|
CV_IMPL_PROPERTY(double, Tau, tau_)
|
||||||
|
CV_IMPL_PROPERTY(double, Labmda, lambda_)
|
||||||
|
CV_IMPL_PROPERTY(double, Alpha, alpha_)
|
||||||
|
CV_IMPL_PROPERTY(int, KernelSize, btvKernelSize_)
|
||||||
|
CV_IMPL_PROPERTY(int, BlurKernelSize, blurKernelSize_)
|
||||||
|
CV_IMPL_PROPERTY(double, BlurSigma, blurSigma_)
|
||||||
|
CV_IMPL_PROPERTY(int, TemporalAreaRadius, temporalAreaRadius_)
|
||||||
|
CV_IMPL_PROPERTY_S(Ptr<cv::superres::DenseOpticalFlowExt>, OpticalFlow, opticalFlow_)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
int scale_;
|
int scale_;
|
||||||
int iterations_;
|
int iterations_;
|
||||||
@ -227,7 +238,8 @@ namespace
|
|||||||
int btvKernelSize_;
|
int btvKernelSize_;
|
||||||
int blurKernelSize_;
|
int blurKernelSize_;
|
||||||
double blurSigma_;
|
double blurSigma_;
|
||||||
Ptr<DenseOpticalFlowExt> opticalFlow_;
|
int temporalAreaRadius_;
|
||||||
|
Ptr<cv::superres::DenseOpticalFlowExt> opticalFlow_;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::vector<Ptr<cuda::Filter> > filters_;
|
std::vector<Ptr<cuda::Filter> > filters_;
|
||||||
@ -272,6 +284,7 @@ namespace
|
|||||||
#else
|
#else
|
||||||
opticalFlow_ = createOptFlow_Farneback();
|
opticalFlow_ = createOptFlow_Farneback();
|
||||||
#endif
|
#endif
|
||||||
|
temporalAreaRadius_ = 0;
|
||||||
|
|
||||||
curBlurKernelSize_ = -1;
|
curBlurKernelSize_ = -1;
|
||||||
curBlurSigma_ = -1.0;
|
curBlurSigma_ = -1.0;
|
||||||
@ -401,11 +414,9 @@ namespace
|
|||||||
|
|
||||||
////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
class BTVL1_CUDA : public SuperResolution, private BTVL1_CUDA_Base
|
class BTVL1_CUDA : public BTVL1_CUDA_Base
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
BTVL1_CUDA();
|
BTVL1_CUDA();
|
||||||
|
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
@ -415,8 +426,6 @@ namespace
|
|||||||
void processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
|
void processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
int temporalAreaRadius_;
|
|
||||||
|
|
||||||
void readNextFrame(Ptr<FrameSource>& frameSource);
|
void readNextFrame(Ptr<FrameSource>& frameSource);
|
||||||
void processFrame(int idx);
|
void processFrame(int idx);
|
||||||
|
|
||||||
@ -438,18 +447,6 @@ namespace
|
|||||||
GpuMat finalOutput_;
|
GpuMat finalOutput_;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(BTVL1_CUDA, "SuperResolution.BTVL1_CUDA",
|
|
||||||
obj.info()->addParam(obj, "scale", obj.scale_, false, 0, 0, "Scale factor.");
|
|
||||||
obj.info()->addParam(obj, "iterations", obj.iterations_, false, 0, 0, "Iteration count.");
|
|
||||||
obj.info()->addParam(obj, "tau", obj.tau_, false, 0, 0, "Asymptotic value of steepest descent method.");
|
|
||||||
obj.info()->addParam(obj, "lambda", obj.lambda_, false, 0, 0, "Weight parameter to balance data term and smoothness term.");
|
|
||||||
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Parameter of spacial distribution in Bilateral-TV.");
|
|
||||||
obj.info()->addParam(obj, "btvKernelSize", obj.btvKernelSize_, false, 0, 0, "Kernel size of Bilateral-TV filter.");
|
|
||||||
obj.info()->addParam(obj, "blurKernelSize", obj.blurKernelSize_, false, 0, 0, "Gaussian blur kernel size.");
|
|
||||||
obj.info()->addParam(obj, "blurSigma", obj.blurSigma_, false, 0, 0, "Gaussian blur sigma.");
|
|
||||||
obj.info()->addParam(obj, "temporalAreaRadius", obj.temporalAreaRadius_, false, 0, 0, "Radius of the temporal search area.");
|
|
||||||
obj.info()->addParam<DenseOpticalFlowExt>(obj, "opticalFlow", obj.opticalFlow_, false, 0, 0, "Dense optical flow algorithm."));
|
|
||||||
|
|
||||||
BTVL1_CUDA::BTVL1_CUDA()
|
BTVL1_CUDA::BTVL1_CUDA()
|
||||||
{
|
{
|
||||||
temporalAreaRadius_ = 4;
|
temporalAreaRadius_ = 4;
|
||||||
|
@ -53,7 +53,7 @@ using namespace cv::superres::detail;
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
class CpuOpticalFlow : public DenseOpticalFlowExt
|
class CpuOpticalFlow : public virtual cv::superres::DenseOpticalFlowExt
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit CpuOpticalFlow(int work_type);
|
explicit CpuOpticalFlow(int work_type);
|
||||||
@ -173,12 +173,20 @@ namespace
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
class Farneback : public CpuOpticalFlow
|
class Farneback : public CpuOpticalFlow, public cv::superres::FarnebackOpticalFlow
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
Farneback();
|
Farneback();
|
||||||
|
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
|
||||||
|
void collectGarbage();
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(double, PyrScale, pyrScale_)
|
||||||
|
CV_IMPL_PROPERTY(int, LevelsNumber, numLevels_)
|
||||||
|
CV_IMPL_PROPERTY(int, WindowSize, winSize_)
|
||||||
|
CV_IMPL_PROPERTY(int, Iterations, numIters_)
|
||||||
|
CV_IMPL_PROPERTY(int, PolyN, polyN_)
|
||||||
|
CV_IMPL_PROPERTY(double, PolySigma, polySigma_)
|
||||||
|
CV_IMPL_PROPERTY(int, Flags, flags_)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void impl(InputArray input0, InputArray input1, OutputArray dst);
|
void impl(InputArray input0, InputArray input1, OutputArray dst);
|
||||||
@ -193,15 +201,6 @@ namespace
|
|||||||
int flags_;
|
int flags_;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(Farneback, "DenseOpticalFlowExt.Farneback",
|
|
||||||
obj.info()->addParam(obj, "pyrScale", obj.pyrScale_);
|
|
||||||
obj.info()->addParam(obj, "numLevels", obj.numLevels_);
|
|
||||||
obj.info()->addParam(obj, "winSize", obj.winSize_);
|
|
||||||
obj.info()->addParam(obj, "numIters", obj.numIters_);
|
|
||||||
obj.info()->addParam(obj, "polyN", obj.polyN_);
|
|
||||||
obj.info()->addParam(obj, "polySigma", obj.polySigma_);
|
|
||||||
obj.info()->addParam(obj, "flags", obj.flags_))
|
|
||||||
|
|
||||||
Farneback::Farneback() : CpuOpticalFlow(CV_8UC1)
|
Farneback::Farneback() : CpuOpticalFlow(CV_8UC1)
|
||||||
{
|
{
|
||||||
pyrScale_ = 0.5;
|
pyrScale_ = 0.5;
|
||||||
@ -213,6 +212,16 @@ namespace
|
|||||||
flags_ = 0;
|
flags_ = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Farneback::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
|
||||||
|
{
|
||||||
|
CpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Farneback::collectGarbage()
|
||||||
|
{
|
||||||
|
CpuOpticalFlow::collectGarbage();
|
||||||
|
}
|
||||||
|
|
||||||
void Farneback::impl(InputArray input0, InputArray input1, OutputArray dst)
|
void Farneback::impl(InputArray input0, InputArray input1, OutputArray dst)
|
||||||
{
|
{
|
||||||
calcOpticalFlowFarneback(input0, input1, (InputOutputArray)dst, pyrScale_,
|
calcOpticalFlowFarneback(input0, input1, (InputOutputArray)dst, pyrScale_,
|
||||||
@ -221,7 +230,7 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback()
|
Ptr<cv::superres::FarnebackOpticalFlow> cv::superres::createOptFlow_Farneback()
|
||||||
{
|
{
|
||||||
return makePtr<Farneback>();
|
return makePtr<Farneback>();
|
||||||
}
|
}
|
||||||
@ -319,65 +328,41 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Simple()
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
class DualTVL1 : public CpuOpticalFlow
|
class DualTVL1 : public CpuOpticalFlow, public virtual cv::superres::DualTVL1OpticalFlow
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
DualTVL1();
|
DualTVL1();
|
||||||
|
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
|
|
||||||
|
CV_WRAP_SAME_PROPERTY(double, Tau, (*alg_))
|
||||||
|
CV_WRAP_SAME_PROPERTY(double, Lambda, (*alg_))
|
||||||
|
CV_WRAP_SAME_PROPERTY(double, Theta, (*alg_))
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, ScalesNumber, (*alg_))
|
||||||
|
CV_WRAP_SAME_PROPERTY(int, WarpingsNumber, (*alg_))
|
||||||
|
CV_WRAP_SAME_PROPERTY(double, Epsilon, (*alg_))
|
||||||
|
CV_WRAP_PROPERTY(int, Iterations, OuterIterations, (*alg_))
|
||||||
|
CV_WRAP_SAME_PROPERTY(bool, UseInitialFlow, (*alg_))
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void impl(InputArray input0, InputArray input1, OutputArray dst);
|
void impl(InputArray input0, InputArray input1, OutputArray dst);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
double tau_;
|
Ptr<cv::DualTVL1OpticalFlow> alg_;
|
||||||
double lambda_;
|
|
||||||
double theta_;
|
|
||||||
int nscales_;
|
|
||||||
int warps_;
|
|
||||||
double epsilon_;
|
|
||||||
int iterations_;
|
|
||||||
bool useInitialFlow_;
|
|
||||||
|
|
||||||
Ptr<cv::DenseOpticalFlow> alg_;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(DualTVL1, "DenseOpticalFlowExt.DualTVL1",
|
|
||||||
obj.info()->addParam(obj, "tau", obj.tau_);
|
|
||||||
obj.info()->addParam(obj, "lambda", obj.lambda_);
|
|
||||||
obj.info()->addParam(obj, "theta", obj.theta_);
|
|
||||||
obj.info()->addParam(obj, "nscales", obj.nscales_);
|
|
||||||
obj.info()->addParam(obj, "warps", obj.warps_);
|
|
||||||
obj.info()->addParam(obj, "epsilon", obj.epsilon_);
|
|
||||||
obj.info()->addParam(obj, "iterations", obj.iterations_);
|
|
||||||
obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow_))
|
|
||||||
|
|
||||||
DualTVL1::DualTVL1() : CpuOpticalFlow(CV_8UC1)
|
DualTVL1::DualTVL1() : CpuOpticalFlow(CV_8UC1)
|
||||||
{
|
{
|
||||||
alg_ = cv::createOptFlow_DualTVL1();
|
alg_ = cv::createOptFlow_DualTVL1();
|
||||||
tau_ = alg_->getDouble("tau");
|
}
|
||||||
lambda_ = alg_->getDouble("lambda");
|
|
||||||
theta_ = alg_->getDouble("theta");
|
void DualTVL1::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
|
||||||
nscales_ = alg_->getInt("nscales");
|
{
|
||||||
warps_ = alg_->getInt("warps");
|
CpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
|
||||||
epsilon_ = alg_->getDouble("epsilon");
|
|
||||||
iterations_ = alg_->getInt("iterations");
|
|
||||||
useInitialFlow_ = alg_->getBool("useInitialFlow");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void DualTVL1::impl(InputArray input0, InputArray input1, OutputArray dst)
|
void DualTVL1::impl(InputArray input0, InputArray input1, OutputArray dst)
|
||||||
{
|
{
|
||||||
alg_->set("tau", tau_);
|
|
||||||
alg_->set("lambda", lambda_);
|
|
||||||
alg_->set("theta", theta_);
|
|
||||||
alg_->set("nscales", nscales_);
|
|
||||||
alg_->set("warps", warps_);
|
|
||||||
alg_->set("epsilon", epsilon_);
|
|
||||||
alg_->set("iterations", iterations_);
|
|
||||||
alg_->set("useInitialFlow", useInitialFlow_);
|
|
||||||
|
|
||||||
alg_->calc(input0, input1, (InputOutputArray)dst);
|
alg_->calc(input0, input1, (InputOutputArray)dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -388,7 +373,7 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1()
|
Ptr<cv::superres::DualTVL1OpticalFlow> cv::superres::createOptFlow_DualTVL1()
|
||||||
{
|
{
|
||||||
return makePtr<DualTVL1>();
|
return makePtr<DualTVL1>();
|
||||||
}
|
}
|
||||||
@ -398,35 +383,35 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1()
|
|||||||
|
|
||||||
#ifndef HAVE_OPENCV_CUDAOPTFLOW
|
#ifndef HAVE_OPENCV_CUDAOPTFLOW
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_CUDA()
|
Ptr<cv::superres::FarnebackOpticalFlow> cv::superres::createOptFlow_Farneback_CUDA()
|
||||||
{
|
{
|
||||||
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
|
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
|
||||||
return Ptr<DenseOpticalFlowExt>();
|
return Ptr<cv::superres::FarnebackOpticalFlow>();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_CUDA()
|
Ptr<cv::superres::DualTVL1OpticalFlow> cv::superres::createOptFlow_DualTVL1_CUDA()
|
||||||
{
|
{
|
||||||
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
|
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
|
||||||
return Ptr<DenseOpticalFlowExt>();
|
return Ptr<cv::superres::DualTVL1OpticalFlow>();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_CUDA()
|
Ptr<cv::superres::BroxOpticalFlow> cv::superres::createOptFlow_Brox_CUDA()
|
||||||
{
|
{
|
||||||
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
|
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
|
||||||
return Ptr<DenseOpticalFlowExt>();
|
return Ptr<cv::superres::BroxOpticalFlow>();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_CUDA()
|
Ptr<cv::superres::PyrLKOpticalFlow> cv::superres::createOptFlow_PyrLK_CUDA()
|
||||||
{
|
{
|
||||||
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
|
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
|
||||||
return Ptr<DenseOpticalFlowExt>();
|
return Ptr<cv::superres::PyrLKOpticalFlow>();
|
||||||
}
|
}
|
||||||
|
|
||||||
#else // HAVE_OPENCV_CUDAOPTFLOW
|
#else // HAVE_OPENCV_CUDAOPTFLOW
|
||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
class GpuOpticalFlow : public DenseOpticalFlowExt
|
class GpuOpticalFlow : public virtual cv::superres::DenseOpticalFlowExt
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
explicit GpuOpticalFlow(int work_type);
|
explicit GpuOpticalFlow(int work_type);
|
||||||
@ -494,15 +479,20 @@ namespace
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
class Brox_CUDA : public GpuOpticalFlow
|
class Brox_CUDA : public GpuOpticalFlow, public virtual cv::superres::BroxOpticalFlow
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
Brox_CUDA();
|
Brox_CUDA();
|
||||||
|
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(double, Alpha, alpha_)
|
||||||
|
CV_IMPL_PROPERTY(double, Gamma, gamma_)
|
||||||
|
CV_IMPL_PROPERTY(double, ScaleFactor, scaleFactor_)
|
||||||
|
CV_IMPL_PROPERTY(int, InnerIterations, innerIterations_)
|
||||||
|
CV_IMPL_PROPERTY(int, OuterIterations, outerIterations_)
|
||||||
|
CV_IMPL_PROPERTY(int, SolverIterations, solverIterations_)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
|
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
|
||||||
|
|
||||||
@ -517,14 +507,6 @@ namespace
|
|||||||
Ptr<cuda::BroxOpticalFlow> alg_;
|
Ptr<cuda::BroxOpticalFlow> alg_;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(Brox_CUDA, "DenseOpticalFlowExt.Brox_CUDA",
|
|
||||||
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Flow smoothness");
|
|
||||||
obj.info()->addParam(obj, "gamma", obj.gamma_, false, 0, 0, "Gradient constancy importance");
|
|
||||||
obj.info()->addParam(obj, "scaleFactor", obj.scaleFactor_, false, 0, 0, "Pyramid scale factor");
|
|
||||||
obj.info()->addParam(obj, "innerIterations", obj.innerIterations_, false, 0, 0, "Number of lagged non-linearity iterations (inner loop)");
|
|
||||||
obj.info()->addParam(obj, "outerIterations", obj.outerIterations_, false, 0, 0, "Number of warping iterations (number of pyramid levels)");
|
|
||||||
obj.info()->addParam(obj, "solverIterations", obj.solverIterations_, false, 0, 0, "Number of linear system solver iterations"))
|
|
||||||
|
|
||||||
Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1)
|
Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1)
|
||||||
{
|
{
|
||||||
alg_ = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10);
|
alg_ = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10);
|
||||||
@ -537,6 +519,11 @@ namespace
|
|||||||
solverIterations_ = alg_->getSolverIterations();
|
solverIterations_ = alg_->getSolverIterations();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Brox_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
|
||||||
|
{
|
||||||
|
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
|
||||||
|
}
|
||||||
|
|
||||||
void Brox_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
|
void Brox_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
|
||||||
{
|
{
|
||||||
alg_->setFlowSmoothness(alpha_);
|
alg_->setFlowSmoothness(alpha_);
|
||||||
@ -563,7 +550,7 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_CUDA()
|
Ptr<cv::superres::BroxOpticalFlow> cv::superres::createOptFlow_Brox_CUDA()
|
||||||
{
|
{
|
||||||
return makePtr<Brox_CUDA>();
|
return makePtr<Brox_CUDA>();
|
||||||
}
|
}
|
||||||
@ -573,15 +560,17 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_CUDA()
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
class PyrLK_CUDA : public GpuOpticalFlow
|
class PyrLK_CUDA : public GpuOpticalFlow, public cv::superres::PyrLKOpticalFlow
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
PyrLK_CUDA();
|
PyrLK_CUDA();
|
||||||
|
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(int, WindowSize, winSize_)
|
||||||
|
CV_IMPL_PROPERTY(int, MaxLevel, maxLevel_)
|
||||||
|
CV_IMPL_PROPERTY(int, Iterations, iterations_)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
|
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
|
||||||
|
|
||||||
@ -593,11 +582,6 @@ namespace
|
|||||||
Ptr<cuda::DensePyrLKOpticalFlow> alg_;
|
Ptr<cuda::DensePyrLKOpticalFlow> alg_;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(PyrLK_CUDA, "DenseOpticalFlowExt.PyrLK_CUDA",
|
|
||||||
obj.info()->addParam(obj, "winSize", obj.winSize_);
|
|
||||||
obj.info()->addParam(obj, "maxLevel", obj.maxLevel_);
|
|
||||||
obj.info()->addParam(obj, "iterations", obj.iterations_))
|
|
||||||
|
|
||||||
PyrLK_CUDA::PyrLK_CUDA() : GpuOpticalFlow(CV_8UC1)
|
PyrLK_CUDA::PyrLK_CUDA() : GpuOpticalFlow(CV_8UC1)
|
||||||
{
|
{
|
||||||
alg_ = cuda::DensePyrLKOpticalFlow::create();
|
alg_ = cuda::DensePyrLKOpticalFlow::create();
|
||||||
@ -607,6 +591,11 @@ namespace
|
|||||||
iterations_ = alg_->getNumIters();
|
iterations_ = alg_->getNumIters();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void PyrLK_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
|
||||||
|
{
|
||||||
|
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
|
||||||
|
}
|
||||||
|
|
||||||
void PyrLK_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
|
void PyrLK_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
|
||||||
{
|
{
|
||||||
alg_->setWinSize(Size(winSize_, winSize_));
|
alg_->setWinSize(Size(winSize_, winSize_));
|
||||||
@ -630,7 +619,7 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_CUDA()
|
Ptr<cv::superres::PyrLKOpticalFlow> cv::superres::createOptFlow_PyrLK_CUDA()
|
||||||
{
|
{
|
||||||
return makePtr<PyrLK_CUDA>();
|
return makePtr<PyrLK_CUDA>();
|
||||||
}
|
}
|
||||||
@ -640,15 +629,21 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_CUDA()
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
class Farneback_CUDA : public GpuOpticalFlow
|
class Farneback_CUDA : public GpuOpticalFlow, public cv::superres::FarnebackOpticalFlow
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
Farneback_CUDA();
|
Farneback_CUDA();
|
||||||
|
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(double, PyrScale, pyrScale_)
|
||||||
|
CV_IMPL_PROPERTY(int, LevelsNumber, numLevels_)
|
||||||
|
CV_IMPL_PROPERTY(int, WindowSize, winSize_)
|
||||||
|
CV_IMPL_PROPERTY(int, Iterations, numIters_)
|
||||||
|
CV_IMPL_PROPERTY(int, PolyN, polyN_)
|
||||||
|
CV_IMPL_PROPERTY(double, PolySigma, polySigma_)
|
||||||
|
CV_IMPL_PROPERTY(int, Flags, flags_)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
|
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
|
||||||
|
|
||||||
@ -664,15 +659,6 @@ namespace
|
|||||||
Ptr<cuda::FarnebackOpticalFlow> alg_;
|
Ptr<cuda::FarnebackOpticalFlow> alg_;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(Farneback_CUDA, "DenseOpticalFlowExt.Farneback_CUDA",
|
|
||||||
obj.info()->addParam(obj, "pyrScale", obj.pyrScale_);
|
|
||||||
obj.info()->addParam(obj, "numLevels", obj.numLevels_);
|
|
||||||
obj.info()->addParam(obj, "winSize", obj.winSize_);
|
|
||||||
obj.info()->addParam(obj, "numIters", obj.numIters_);
|
|
||||||
obj.info()->addParam(obj, "polyN", obj.polyN_);
|
|
||||||
obj.info()->addParam(obj, "polySigma", obj.polySigma_);
|
|
||||||
obj.info()->addParam(obj, "flags", obj.flags_))
|
|
||||||
|
|
||||||
Farneback_CUDA::Farneback_CUDA() : GpuOpticalFlow(CV_8UC1)
|
Farneback_CUDA::Farneback_CUDA() : GpuOpticalFlow(CV_8UC1)
|
||||||
{
|
{
|
||||||
alg_ = cuda::FarnebackOpticalFlow::create();
|
alg_ = cuda::FarnebackOpticalFlow::create();
|
||||||
@ -686,6 +672,11 @@ namespace
|
|||||||
flags_ = alg_->getFlags();
|
flags_ = alg_->getFlags();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Farneback_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
|
||||||
|
{
|
||||||
|
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
|
||||||
|
}
|
||||||
|
|
||||||
void Farneback_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
|
void Farneback_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
|
||||||
{
|
{
|
||||||
alg_->setPyrScale(pyrScale_);
|
alg_->setPyrScale(pyrScale_);
|
||||||
@ -713,7 +704,7 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_CUDA()
|
Ptr<cv::superres::FarnebackOpticalFlow> cv::superres::createOptFlow_Farneback_CUDA()
|
||||||
{
|
{
|
||||||
return makePtr<Farneback_CUDA>();
|
return makePtr<Farneback_CUDA>();
|
||||||
}
|
}
|
||||||
@ -723,15 +714,22 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_CUDA()
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
class DualTVL1_CUDA : public GpuOpticalFlow
|
class DualTVL1_CUDA : public GpuOpticalFlow, public cv::superres::DualTVL1OpticalFlow
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
AlgorithmInfo* info() const;
|
|
||||||
|
|
||||||
DualTVL1_CUDA();
|
DualTVL1_CUDA();
|
||||||
|
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
|
|
||||||
|
CV_IMPL_PROPERTY(double, Tau, tau_)
|
||||||
|
CV_IMPL_PROPERTY(double, Lambda, lambda_)
|
||||||
|
CV_IMPL_PROPERTY(double, Theta, theta_)
|
||||||
|
CV_IMPL_PROPERTY(int, ScalesNumber, nscales_)
|
||||||
|
CV_IMPL_PROPERTY(int, WarpingsNumber, warps_)
|
||||||
|
CV_IMPL_PROPERTY(double, Epsilon, epsilon_)
|
||||||
|
CV_IMPL_PROPERTY(int, Iterations, iterations_)
|
||||||
|
CV_IMPL_PROPERTY(bool, UseInitialFlow, useInitialFlow_)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
|
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
|
||||||
|
|
||||||
@ -748,16 +746,6 @@ namespace
|
|||||||
Ptr<cuda::OpticalFlowDual_TVL1> alg_;
|
Ptr<cuda::OpticalFlowDual_TVL1> alg_;
|
||||||
};
|
};
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(DualTVL1_CUDA, "DenseOpticalFlowExt.DualTVL1_CUDA",
|
|
||||||
obj.info()->addParam(obj, "tau", obj.tau_);
|
|
||||||
obj.info()->addParam(obj, "lambda", obj.lambda_);
|
|
||||||
obj.info()->addParam(obj, "theta", obj.theta_);
|
|
||||||
obj.info()->addParam(obj, "nscales", obj.nscales_);
|
|
||||||
obj.info()->addParam(obj, "warps", obj.warps_);
|
|
||||||
obj.info()->addParam(obj, "epsilon", obj.epsilon_);
|
|
||||||
obj.info()->addParam(obj, "iterations", obj.iterations_);
|
|
||||||
obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow_))
|
|
||||||
|
|
||||||
DualTVL1_CUDA::DualTVL1_CUDA() : GpuOpticalFlow(CV_8UC1)
|
DualTVL1_CUDA::DualTVL1_CUDA() : GpuOpticalFlow(CV_8UC1)
|
||||||
{
|
{
|
||||||
alg_ = cuda::OpticalFlowDual_TVL1::create();
|
alg_ = cuda::OpticalFlowDual_TVL1::create();
|
||||||
@ -772,6 +760,11 @@ namespace
|
|||||||
useInitialFlow_ = alg_->getUseInitialFlow();
|
useInitialFlow_ = alg_->getUseInitialFlow();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void DualTVL1_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
|
||||||
|
{
|
||||||
|
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
|
||||||
|
}
|
||||||
|
|
||||||
void DualTVL1_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
|
void DualTVL1_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
|
||||||
{
|
{
|
||||||
alg_->setTau(tau_);
|
alg_->setTau(tau_);
|
||||||
@ -800,7 +793,7 @@ namespace
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_CUDA()
|
Ptr<cv::superres::DualTVL1OpticalFlow> cv::superres::createOptFlow_DualTVL1_CUDA()
|
||||||
{
|
{
|
||||||
return makePtr<DualTVL1_CUDA>();
|
return makePtr<DualTVL1_CUDA>();
|
||||||
}
|
}
|
||||||
|
@ -45,11 +45,6 @@
|
|||||||
using namespace cv;
|
using namespace cv;
|
||||||
using namespace cv::superres;
|
using namespace cv::superres;
|
||||||
|
|
||||||
bool cv::superres::initModule_superres()
|
|
||||||
{
|
|
||||||
return !createSuperResolution_BTVL1().empty();
|
|
||||||
}
|
|
||||||
|
|
||||||
cv::superres::SuperResolution::SuperResolution()
|
cv::superres::SuperResolution::SuperResolution()
|
||||||
{
|
{
|
||||||
frameSource_ = createFrameSource_Empty();
|
frameSource_ = createFrameSource_Empty();
|
||||||
|
@ -222,11 +222,11 @@ void SuperResolution::RunTest(cv::Ptr<cv::superres::SuperResolution> superRes)
|
|||||||
|
|
||||||
ASSERT_FALSE( superRes.empty() );
|
ASSERT_FALSE( superRes.empty() );
|
||||||
|
|
||||||
const int btvKernelSize = superRes->getInt("btvKernelSize");
|
const int btvKernelSize = superRes->getKernelSize();
|
||||||
|
|
||||||
superRes->set("scale", scale);
|
superRes->setScale(scale);
|
||||||
superRes->set("iterations", iterations);
|
superRes->setIterations(iterations);
|
||||||
superRes->set("temporalAreaRadius", temporalAreaRadius);
|
superRes->setTemporalAreaRadius(temporalAreaRadius);
|
||||||
|
|
||||||
cv::Ptr<cv::superres::FrameSource> goldSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale));
|
cv::Ptr<cv::superres::FrameSource> goldSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale));
|
||||||
cv::Ptr<cv::superres::FrameSource> lowResSource(new DegradeFrameSource(
|
cv::Ptr<cv::superres::FrameSource> lowResSource(new DegradeFrameSource(
|
||||||
|
@ -380,6 +380,21 @@ public:
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
class CV_EXPORTS_W DenseOpticalFlow : public Algorithm
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
/** @brief Calculates an optical flow.
|
||||||
|
|
||||||
|
@param I0 first 8-bit single-channel input image.
|
||||||
|
@param I1 second input image of the same size and the same type as prev.
|
||||||
|
@param flow computed flow image that has the same size as prev and type CV_32FC2.
|
||||||
|
*/
|
||||||
|
CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0;
|
||||||
|
/** @brief Releases all inner buffers.
|
||||||
|
*/
|
||||||
|
CV_WRAP virtual void collectGarbage() = 0;
|
||||||
|
};
|
||||||
|
|
||||||
/** @brief "Dual TV L1" Optical Flow Algorithm.
|
/** @brief "Dual TV L1" Optical Flow Algorithm.
|
||||||
|
|
||||||
The class implements the "Dual TV L1" optical flow algorithm described in @cite Zach2007 and
|
The class implements the "Dual TV L1" optical flow algorithm described in @cite Zach2007 and
|
||||||
@ -422,24 +437,38 @@ constructing the class instance:
|
|||||||
C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
|
C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
|
||||||
Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
|
Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
|
||||||
*/
|
*/
|
||||||
class CV_EXPORTS_W DenseOpticalFlow : public Algorithm
|
class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
/** @brief Calculates an optical flow.
|
//! @brief Time step of the numerical scheme
|
||||||
|
CV_PURE_PROPERTY(double, Tau)
|
||||||
@param I0 first 8-bit single-channel input image.
|
//! @brief Weight parameter for the data term, attachment parameter
|
||||||
@param I1 second input image of the same size and the same type as prev.
|
CV_PURE_PROPERTY(double, Lambda)
|
||||||
@param flow computed flow image that has the same size as prev and type CV_32FC2.
|
//! @brief Weight parameter for (u - v)^2, tightness parameter
|
||||||
*/
|
CV_PURE_PROPERTY(double, Theta)
|
||||||
CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0;
|
//! @brief coefficient for additional illumination variation term
|
||||||
/** @brief Releases all inner buffers.
|
CV_PURE_PROPERTY(double, Gamma)
|
||||||
*/
|
//! @brief Number of scales used to create the pyramid of images
|
||||||
CV_WRAP virtual void collectGarbage() = 0;
|
CV_PURE_PROPERTY(int, ScalesNumber)
|
||||||
|
//! @brief Number of warpings per scale
|
||||||
|
CV_PURE_PROPERTY(int, WarpingsNumber)
|
||||||
|
//! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
|
||||||
|
CV_PURE_PROPERTY(double, Epsilon)
|
||||||
|
//! @brief Inner iterations (between outlier filtering) used in the numerical scheme
|
||||||
|
CV_PURE_PROPERTY(int, InnerIterations)
|
||||||
|
//! @brief Outer iterations (number of inner loops) used in the numerical scheme
|
||||||
|
CV_PURE_PROPERTY(int, OuterIterations)
|
||||||
|
//! @brief Use initial flow
|
||||||
|
CV_PURE_PROPERTY(bool, UseInitialFlow)
|
||||||
|
//! @brief Step between scales (<1)
|
||||||
|
CV_PURE_PROPERTY(double, ScaleStep)
|
||||||
|
//! @brief Median filter kernel size (1 = no filter) (3 or 5)
|
||||||
|
CV_PURE_PROPERTY(int, MedianFiltering)
|
||||||
};
|
};
|
||||||
|
|
||||||
/** @brief Creates instance of cv::DenseOpticalFlow
|
/** @brief Creates instance of cv::DenseOpticalFlow
|
||||||
*/
|
*/
|
||||||
CV_EXPORTS_W Ptr<DenseOpticalFlow> createOptFlow_DualTVL1();
|
CV_EXPORTS_W Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();
|
||||||
|
|
||||||
//! @} video_track
|
//! @} video_track
|
||||||
|
|
||||||
|
@ -87,11 +87,11 @@ OCL_PERF_TEST_P(OpticalFlowDualTVL1Fixture, OpticalFlowDualTVL1,
|
|||||||
declare.in(uFrame0, uFrame1, WARMUP_READ).out(uFlow, WARMUP_READ);
|
declare.in(uFrame0, uFrame1, WARMUP_READ).out(uFlow, WARMUP_READ);
|
||||||
|
|
||||||
//create algorithm
|
//create algorithm
|
||||||
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
||||||
|
|
||||||
//set parameters
|
//set parameters
|
||||||
alg->set("scaleStep", scaleStep);
|
alg->setScaleStep(scaleStep);
|
||||||
alg->setInt("medianFiltering", medianFiltering);
|
alg->setMedianFiltering(medianFiltering);
|
||||||
|
|
||||||
if (useInitFlow)
|
if (useInitFlow)
|
||||||
{
|
{
|
||||||
@ -100,7 +100,7 @@ OCL_PERF_TEST_P(OpticalFlowDualTVL1Fixture, OpticalFlowDualTVL1,
|
|||||||
}
|
}
|
||||||
|
|
||||||
//set flag to use initial flow
|
//set flag to use initial flow
|
||||||
alg->setBool("useInitialFlow", useInitFlow);
|
alg->setUseInitialFlow(useInitFlow);
|
||||||
OCL_TEST_CYCLE()
|
OCL_TEST_CYCLE()
|
||||||
alg->calc(uFrame0, uFrame1, uFlow);
|
alg->calc(uFrame0, uFrame1, uFlow);
|
||||||
|
|
||||||
@ -109,4 +109,4 @@ OCL_PERF_TEST_P(OpticalFlowDualTVL1Fixture, OpticalFlowDualTVL1,
|
|||||||
}
|
}
|
||||||
} // namespace cvtest::ocl
|
} // namespace cvtest::ocl
|
||||||
|
|
||||||
#endif // HAVE_OPENCL
|
#endif // HAVE_OPENCL
|
||||||
|
@ -160,8 +160,6 @@ public:
|
|||||||
nNextLongUpdate = Scalar::all(0);
|
nNextLongUpdate = Scalar::all(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
virtual int getHistory() const { return history; }
|
virtual int getHistory() const { return history; }
|
||||||
virtual void setHistory(int _nframes) { history = _nframes; }
|
virtual void setHistory(int _nframes) { history = _nframes; }
|
||||||
|
|
||||||
|
@ -230,8 +230,6 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
virtual AlgorithmInfo* info() const { return 0; }
|
|
||||||
|
|
||||||
virtual int getHistory() const { return history; }
|
virtual int getHistory() const { return history; }
|
||||||
virtual void setHistory(int _nframes) { history = _nframes; }
|
virtual void setHistory(int _nframes) { history = _nframes; }
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@ using namespace cv;
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
class OpticalFlowDual_TVL1 : public DenseOpticalFlow
|
class OpticalFlowDual_TVL1 : public DualTVL1OpticalFlow
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
OpticalFlowDual_TVL1();
|
OpticalFlowDual_TVL1();
|
||||||
@ -94,7 +94,18 @@ public:
|
|||||||
void calc(InputArray I0, InputArray I1, InputOutputArray flow);
|
void calc(InputArray I0, InputArray I1, InputOutputArray flow);
|
||||||
void collectGarbage();
|
void collectGarbage();
|
||||||
|
|
||||||
AlgorithmInfo* info() const;
|
CV_IMPL_PROPERTY(double, Tau, tau)
|
||||||
|
CV_IMPL_PROPERTY(double, Lambda, lambda)
|
||||||
|
CV_IMPL_PROPERTY(double, Theta, theta)
|
||||||
|
CV_IMPL_PROPERTY(double, Gamma, gamma)
|
||||||
|
CV_IMPL_PROPERTY(int, ScalesNumber, nscales)
|
||||||
|
CV_IMPL_PROPERTY(int, WarpingsNumber, warps)
|
||||||
|
CV_IMPL_PROPERTY(double, Epsilon, epsilon)
|
||||||
|
CV_IMPL_PROPERTY(int, InnerIterations, innerIterations)
|
||||||
|
CV_IMPL_PROPERTY(int, OuterIterations, outerIterations)
|
||||||
|
CV_IMPL_PROPERTY(bool, UseInitialFlow, useInitialFlow)
|
||||||
|
CV_IMPL_PROPERTY(double, ScaleStep, scaleStep)
|
||||||
|
CV_IMPL_PROPERTY(int, MedianFiltering, medianFiltering)
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
double tau;
|
double tau;
|
||||||
@ -1416,35 +1427,9 @@ void OpticalFlowDual_TVL1::collectGarbage()
|
|||||||
dum.norm_buf.release();
|
dum.norm_buf.release();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
CV_INIT_ALGORITHM(OpticalFlowDual_TVL1, "DenseOpticalFlow.DualTVL1",
|
|
||||||
obj.info()->addParam(obj, "tau", obj.tau, false, 0, 0,
|
|
||||||
"Time step of the numerical scheme");
|
|
||||||
obj.info()->addParam(obj, "lambda", obj.lambda, false, 0, 0,
|
|
||||||
"Weight parameter for the data term, attachment parameter");
|
|
||||||
obj.info()->addParam(obj, "theta", obj.theta, false, 0, 0,
|
|
||||||
"Weight parameter for (u - v)^2, tightness parameter");
|
|
||||||
obj.info()->addParam(obj, "nscales", obj.nscales, false, 0, 0,
|
|
||||||
"Number of scales used to create the pyramid of images");
|
|
||||||
obj.info()->addParam(obj, "warps", obj.warps, false, 0, 0,
|
|
||||||
"Number of warpings per scale");
|
|
||||||
obj.info()->addParam(obj, "medianFiltering", obj.medianFiltering, false, 0, 0,
|
|
||||||
"Median filter kernel size (1 = no filter) (3 or 5)");
|
|
||||||
obj.info()->addParam(obj, "scaleStep", obj.scaleStep, false, 0, 0,
|
|
||||||
"Step between scales (<1)");
|
|
||||||
obj.info()->addParam(obj, "epsilon", obj.epsilon, false, 0, 0,
|
|
||||||
"Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time");
|
|
||||||
obj.info()->addParam(obj, "innerIterations", obj.innerIterations, false, 0, 0,
|
|
||||||
"inner iterations (between outlier filtering) used in the numerical scheme");
|
|
||||||
obj.info()->addParam(obj, "outerIterations", obj.outerIterations, false, 0, 0,
|
|
||||||
"outer iterations (number of inner loops) used in the numerical scheme");
|
|
||||||
obj.info()->addParam(obj, "gamma", obj.gamma, false, 0, 0,
|
|
||||||
"coefficient for additional illumination variation term");
|
|
||||||
obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow))
|
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
Ptr<DenseOpticalFlow> cv::createOptFlow_DualTVL1()
|
Ptr<DualTVL1OpticalFlow> cv::createOptFlow_DualTVL1()
|
||||||
{
|
{
|
||||||
return makePtr<OpticalFlowDual_TVL1>();
|
return makePtr<OpticalFlowDual_TVL1>();
|
||||||
}
|
}
|
||||||
|
@ -82,11 +82,11 @@ OCL_TEST_P(OpticalFlowTVL1, Mat)
|
|||||||
cv::Mat flow; cv::UMat uflow;
|
cv::Mat flow; cv::UMat uflow;
|
||||||
|
|
||||||
//create algorithm
|
//create algorithm
|
||||||
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
|
||||||
|
|
||||||
//set parameters
|
//set parameters
|
||||||
alg->set("scaleStep", scaleStep);
|
alg->setScaleStep(scaleStep);
|
||||||
alg->setInt("medianFiltering", medianFiltering);
|
alg->setMedianFiltering(medianFiltering);
|
||||||
|
|
||||||
//create initial flow as result of algorithm calculation
|
//create initial flow as result of algorithm calculation
|
||||||
if (useInitFlow)
|
if (useInitFlow)
|
||||||
@ -96,7 +96,7 @@ OCL_TEST_P(OpticalFlowTVL1, Mat)
|
|||||||
}
|
}
|
||||||
|
|
||||||
//set flag to use initial flow as it is ready to use
|
//set flag to use initial flow as it is ready to use
|
||||||
alg->setBool("useInitialFlow", useInitFlow);
|
alg->setUseInitialFlow(useInitFlow);
|
||||||
|
|
||||||
OCL_OFF(alg->calc(frame0, frame1, flow));
|
OCL_OFF(alg->calc(frame0, frame1, flow));
|
||||||
OCL_ON(alg->calc(frame0, frame1, uflow));
|
OCL_ON(alg->calc(frame0, frame1, uflow));
|
||||||
@ -114,4 +114,4 @@ OCL_INSTANTIATE_TEST_CASE_P(Video, OpticalFlowTVL1,
|
|||||||
|
|
||||||
} } // namespace cvtest::ocl
|
} } // namespace cvtest::ocl
|
||||||
|
|
||||||
#endif // HAVE_OPENCL
|
#endif // HAVE_OPENCL
|
||||||
|
@ -36,9 +36,11 @@ int main( int /*argc*/, char** /*argv*/ )
|
|||||||
samples = samples.reshape(1, 0);
|
samples = samples.reshape(1, 0);
|
||||||
|
|
||||||
// cluster the data
|
// cluster the data
|
||||||
Ptr<EM> em_model = EM::train( samples, noArray(), labels, noArray(),
|
Ptr<EM> em_model = EM::create();
|
||||||
EM::Params(N, EM::COV_MAT_SPHERICAL,
|
em_model->setClustersNumber(N);
|
||||||
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1)));
|
em_model->setCovarianceMatrixType(EM::COV_MAT_SPHERICAL);
|
||||||
|
em_model->setTermCriteria(TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1));
|
||||||
|
em_model->trainEM( samples, noArray(), labels, noArray() );
|
||||||
|
|
||||||
// classify every image pixel
|
// classify every image pixel
|
||||||
for( i = 0; i < img.rows; i++ )
|
for( i = 0; i < img.rows; i++ )
|
||||||
|
@ -178,8 +178,23 @@ build_rtrees_classifier( const string& data_filename,
|
|||||||
{
|
{
|
||||||
// create classifier by using <data> and <responses>
|
// create classifier by using <data> and <responses>
|
||||||
cout << "Training the classifier ...\n";
|
cout << "Training the classifier ...\n";
|
||||||
|
// Params( int maxDepth, int minSampleCount,
|
||||||
|
// double regressionAccuracy, bool useSurrogates,
|
||||||
|
// int maxCategories, const Mat& priors,
|
||||||
|
// bool calcVarImportance, int nactiveVars,
|
||||||
|
// TermCriteria termCrit );
|
||||||
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
||||||
model = StatModel::train<RTrees>(tdata, RTrees::Params(10,10,0,false,15,Mat(),true,4,TC(100,0.01f)));
|
model = RTrees::create();
|
||||||
|
model->setMaxDepth(10);
|
||||||
|
model->setMinSampleCount(10);
|
||||||
|
model->setRegressionAccuracy(0);
|
||||||
|
model->setUseSurrogates(false);
|
||||||
|
model->setMaxCategories(15);
|
||||||
|
model->setPriors(Mat());
|
||||||
|
model->setCalculateVarImportance(true);
|
||||||
|
model->setActiveVarCount(4);
|
||||||
|
model->setTermCriteria(TC(100,0.01f));
|
||||||
|
model->train(tdata);
|
||||||
cout << endl;
|
cout << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -269,7 +284,14 @@ build_boost_classifier( const string& data_filename,
|
|||||||
priors[1] = 26;
|
priors[1] = 26;
|
||||||
|
|
||||||
cout << "Training the classifier (may take a few minutes)...\n";
|
cout << "Training the classifier (may take a few minutes)...\n";
|
||||||
model = StatModel::train<Boost>(tdata, Boost::Params(Boost::GENTLE, 100, 0.95, 5, false, Mat(priors) ));
|
model = Boost::create();
|
||||||
|
model->setBoostType(Boost::GENTLE);
|
||||||
|
model->setWeakCount(100);
|
||||||
|
model->setWeightTrimRate(0.95);
|
||||||
|
model->setMaxDepth(5);
|
||||||
|
model->setUseSurrogates(false);
|
||||||
|
model->setPriors(Mat(priors));
|
||||||
|
model->train(tdata);
|
||||||
cout << endl;
|
cout << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,11 +396,11 @@ build_mlp_classifier( const string& data_filename,
|
|||||||
Mat layer_sizes( 1, nlayers, CV_32S, layer_sz );
|
Mat layer_sizes( 1, nlayers, CV_32S, layer_sz );
|
||||||
|
|
||||||
#if 1
|
#if 1
|
||||||
int method = ANN_MLP::Params::BACKPROP;
|
int method = ANN_MLP::BACKPROP;
|
||||||
double method_param = 0.001;
|
double method_param = 0.001;
|
||||||
int max_iter = 300;
|
int max_iter = 300;
|
||||||
#else
|
#else
|
||||||
int method = ANN_MLP::Params::RPROP;
|
int method = ANN_MLP::RPROP;
|
||||||
double method_param = 0.1;
|
double method_param = 0.1;
|
||||||
int max_iter = 1000;
|
int max_iter = 1000;
|
||||||
#endif
|
#endif
|
||||||
@ -386,7 +408,12 @@ build_mlp_classifier( const string& data_filename,
|
|||||||
Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
|
Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
|
||||||
|
|
||||||
cout << "Training the classifier (may take a few minutes)...\n";
|
cout << "Training the classifier (may take a few minutes)...\n";
|
||||||
model = StatModel::train<ANN_MLP>(tdata, ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0, TC(max_iter,0), method, method_param));
|
model = ANN_MLP::create();
|
||||||
|
model->setLayerSizes(layer_sizes);
|
||||||
|
model->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0);
|
||||||
|
model->setTermCriteria(TC(max_iter,0));
|
||||||
|
model->setTrainMethod(method, method_param);
|
||||||
|
model->train(tdata);
|
||||||
cout << endl;
|
cout << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -403,7 +430,6 @@ build_knearest_classifier( const string& data_filename, int K )
|
|||||||
if( !ok )
|
if( !ok )
|
||||||
return ok;
|
return ok;
|
||||||
|
|
||||||
Ptr<KNearest> model;
|
|
||||||
|
|
||||||
int nsamples_all = data.rows;
|
int nsamples_all = data.rows;
|
||||||
int ntrain_samples = (int)(nsamples_all*0.8);
|
int ntrain_samples = (int)(nsamples_all*0.8);
|
||||||
@ -411,7 +437,10 @@ build_knearest_classifier( const string& data_filename, int K )
|
|||||||
// create classifier by using <data> and <responses>
|
// create classifier by using <data> and <responses>
|
||||||
cout << "Training the classifier ...\n";
|
cout << "Training the classifier ...\n";
|
||||||
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
||||||
model = StatModel::train<KNearest>(tdata, KNearest::Params(K, true));
|
Ptr<KNearest> model = KNearest::create();
|
||||||
|
model->setDefaultK(K);
|
||||||
|
model->setIsClassifier(true);
|
||||||
|
model->train(tdata);
|
||||||
cout << endl;
|
cout << endl;
|
||||||
|
|
||||||
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
|
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
|
||||||
@ -435,7 +464,8 @@ build_nbayes_classifier( const string& data_filename )
|
|||||||
// create classifier by using <data> and <responses>
|
// create classifier by using <data> and <responses>
|
||||||
cout << "Training the classifier ...\n";
|
cout << "Training the classifier ...\n";
|
||||||
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
||||||
model = StatModel::train<NormalBayesClassifier>(tdata, NormalBayesClassifier::Params());
|
model = NormalBayesClassifier::create();
|
||||||
|
model->train(tdata);
|
||||||
cout << endl;
|
cout << endl;
|
||||||
|
|
||||||
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
|
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
|
||||||
@ -471,13 +501,11 @@ build_svm_classifier( const string& data_filename,
|
|||||||
// create classifier by using <data> and <responses>
|
// create classifier by using <data> and <responses>
|
||||||
cout << "Training the classifier ...\n";
|
cout << "Training the classifier ...\n";
|
||||||
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
||||||
|
model = SVM::create();
|
||||||
SVM::Params params;
|
model->setType(SVM::C_SVC);
|
||||||
params.svmType = SVM::C_SVC;
|
model->setKernel(SVM::LINEAR);
|
||||||
params.kernelType = SVM::LINEAR;
|
model->setC(1);
|
||||||
params.C = 1;
|
model->train(tdata);
|
||||||
|
|
||||||
model = StatModel::train<SVM>(tdata, params);
|
|
||||||
cout << endl;
|
cout << endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -132,20 +132,16 @@ int main()
|
|||||||
showImage(data_train, 28, "train data");
|
showImage(data_train, 28, "train data");
|
||||||
showImage(data_test, 28, "test data");
|
showImage(data_test, 28, "test data");
|
||||||
|
|
||||||
|
|
||||||
// simple case with batch gradient
|
// simple case with batch gradient
|
||||||
LogisticRegression::Params params = LogisticRegression::Params(
|
|
||||||
0.001, 10, LogisticRegression::BATCH, LogisticRegression::REG_L2, 1, 1);
|
|
||||||
// simple case with mini-batch gradient
|
|
||||||
// LogisticRegression::Params params = LogisticRegression::Params(
|
|
||||||
// 0.001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
|
|
||||||
|
|
||||||
// mini-batch gradient with higher accuracy
|
|
||||||
// LogisticRegression::Params params = LogisticRegression::Params(
|
|
||||||
// 0.000001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
|
|
||||||
|
|
||||||
cout << "training...";
|
cout << "training...";
|
||||||
Ptr<StatModel> lr1 = LogisticRegression::create(params);
|
//! [init]
|
||||||
|
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
|
||||||
|
lr1->setLearningRate(0.001);
|
||||||
|
lr1->setIterations(10);
|
||||||
|
lr1->setRegularization(LogisticRegression::REG_L2);
|
||||||
|
lr1->setTrainMethod(LogisticRegression::BATCH);
|
||||||
|
lr1->setMiniBatchSize(1);
|
||||||
|
//! [init]
|
||||||
lr1->train(data_train, ROW_SAMPLE, labels_train);
|
lr1->train(data_train, ROW_SAMPLE, labels_train);
|
||||||
cout << "done!" << endl;
|
cout << "done!" << endl;
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ static void predict_and_paint(const Ptr<StatModel>& model, Mat& dst)
|
|||||||
static void find_decision_boundary_NBC()
|
static void find_decision_boundary_NBC()
|
||||||
{
|
{
|
||||||
// learn classifier
|
// learn classifier
|
||||||
Ptr<NormalBayesClassifier> normalBayesClassifier = StatModel::train<NormalBayesClassifier>(prepare_train_data(), NormalBayesClassifier::Params());
|
Ptr<NormalBayesClassifier> normalBayesClassifier = StatModel::train<NormalBayesClassifier>(prepare_train_data());
|
||||||
|
|
||||||
predict_and_paint(normalBayesClassifier, imgDst);
|
predict_and_paint(normalBayesClassifier, imgDst);
|
||||||
}
|
}
|
||||||
@ -112,15 +112,29 @@ static void find_decision_boundary_NBC()
|
|||||||
#if _KNN_
|
#if _KNN_
|
||||||
static void find_decision_boundary_KNN( int K )
|
static void find_decision_boundary_KNN( int K )
|
||||||
{
|
{
|
||||||
Ptr<KNearest> knn = StatModel::train<KNearest>(prepare_train_data(), KNearest::Params(K, true));
|
|
||||||
|
Ptr<KNearest> knn = KNearest::create();
|
||||||
|
knn->setDefaultK(K);
|
||||||
|
knn->setIsClassifier(true);
|
||||||
|
knn->train(prepare_train_data());
|
||||||
predict_and_paint(knn, imgDst);
|
predict_and_paint(knn, imgDst);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if _SVM_
|
#if _SVM_
|
||||||
static void find_decision_boundary_SVM( SVM::Params params )
|
static void find_decision_boundary_SVM( double C )
|
||||||
{
|
{
|
||||||
Ptr<SVM> svm = StatModel::train<SVM>(prepare_train_data(), params);
|
Ptr<SVM> svm = SVM::create();
|
||||||
|
svm->setType(SVM::C_SVC);
|
||||||
|
svm->setKernel(SVM::POLY); //SVM::LINEAR;
|
||||||
|
svm->setDegree(0.5);
|
||||||
|
svm->setGamma(1);
|
||||||
|
svm->setCoef0(1);
|
||||||
|
svm->setNu(0.5);
|
||||||
|
svm->setP(0);
|
||||||
|
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01));
|
||||||
|
svm->setC(C);
|
||||||
|
svm->train(prepare_train_data());
|
||||||
predict_and_paint(svm, imgDst);
|
predict_and_paint(svm, imgDst);
|
||||||
|
|
||||||
Mat sv = svm->getSupportVectors();
|
Mat sv = svm->getSupportVectors();
|
||||||
@ -135,16 +149,14 @@ static void find_decision_boundary_SVM( SVM::Params params )
|
|||||||
#if _DT_
|
#if _DT_
|
||||||
static void find_decision_boundary_DT()
|
static void find_decision_boundary_DT()
|
||||||
{
|
{
|
||||||
DTrees::Params params;
|
Ptr<DTrees> dtree = DTrees::create();
|
||||||
params.maxDepth = 8;
|
dtree->setMaxDepth(8);
|
||||||
params.minSampleCount = 2;
|
dtree->setMinSampleCount(2);
|
||||||
params.useSurrogates = false;
|
dtree->setUseSurrogates(false);
|
||||||
params.CVFolds = 0; // the number of cross-validation folds
|
dtree->setCVFolds(0); // the number of cross-validation folds
|
||||||
params.use1SERule = false;
|
dtree->setUse1SERule(false);
|
||||||
params.truncatePrunedTree = false;
|
dtree->setTruncatePrunedTree(false);
|
||||||
|
dtree->train(prepare_train_data());
|
||||||
Ptr<DTrees> dtree = StatModel::train<DTrees>(prepare_train_data(), params);
|
|
||||||
|
|
||||||
predict_and_paint(dtree, imgDst);
|
predict_and_paint(dtree, imgDst);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -152,15 +164,14 @@ static void find_decision_boundary_DT()
|
|||||||
#if _BT_
|
#if _BT_
|
||||||
static void find_decision_boundary_BT()
|
static void find_decision_boundary_BT()
|
||||||
{
|
{
|
||||||
Boost::Params params( Boost::DISCRETE, // boost_type
|
Ptr<Boost> boost = Boost::create();
|
||||||
100, // weak_count
|
boost->setBoostType(Boost::DISCRETE);
|
||||||
0.95, // weight_trim_rate
|
boost->setWeakCount(100);
|
||||||
2, // max_depth
|
boost->setWeightTrimRate(0.95);
|
||||||
false, //use_surrogates
|
boost->setMaxDepth(2);
|
||||||
Mat() // priors
|
boost->setUseSurrogates(false);
|
||||||
);
|
boost->setPriors(Mat());
|
||||||
|
boost->train(prepare_train_data());
|
||||||
Ptr<Boost> boost = StatModel::train<Boost>(prepare_train_data(), params);
|
|
||||||
predict_and_paint(boost, imgDst);
|
predict_and_paint(boost, imgDst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -185,18 +196,17 @@ static void find_decision_boundary_GBT()
|
|||||||
#if _RF_
|
#if _RF_
|
||||||
static void find_decision_boundary_RF()
|
static void find_decision_boundary_RF()
|
||||||
{
|
{
|
||||||
RTrees::Params params( 4, // max_depth,
|
Ptr<RTrees> rtrees = RTrees::create();
|
||||||
2, // min_sample_count,
|
rtrees->setMaxDepth(4);
|
||||||
0.f, // regression_accuracy,
|
rtrees->setMinSampleCount(2);
|
||||||
false, // use_surrogates,
|
rtrees->setRegressionAccuracy(0.f);
|
||||||
16, // max_categories,
|
rtrees->setUseSurrogates(false);
|
||||||
Mat(), // priors,
|
rtrees->setMaxCategories(16);
|
||||||
false, // calc_var_importance,
|
rtrees->setPriors(Mat());
|
||||||
1, // nactive_vars,
|
rtrees->setCalculateVarImportance(false);
|
||||||
TermCriteria(TermCriteria::MAX_ITER, 5, 0) // max_num_of_trees_in_the_forest,
|
rtrees->setActiveVarCount(1);
|
||||||
);
|
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 5, 0));
|
||||||
|
rtrees->train(prepare_train_data());
|
||||||
Ptr<RTrees> rtrees = StatModel::train<RTrees>(prepare_train_data(), params);
|
|
||||||
predict_and_paint(rtrees, imgDst);
|
predict_and_paint(rtrees, imgDst);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,9 +215,6 @@ static void find_decision_boundary_RF()
|
|||||||
#if _ANN_
|
#if _ANN_
|
||||||
static void find_decision_boundary_ANN( const Mat& layer_sizes )
|
static void find_decision_boundary_ANN( const Mat& layer_sizes )
|
||||||
{
|
{
|
||||||
ANN_MLP::Params params(layer_sizes, ANN_MLP::SIGMOID_SYM, 1, 1, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON),
|
|
||||||
ANN_MLP::Params::BACKPROP, 0.001);
|
|
||||||
|
|
||||||
Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 );
|
Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 );
|
||||||
for( int i = 0; i < trainClasses.rows; i++ )
|
for( int i = 0; i < trainClasses.rows; i++ )
|
||||||
{
|
{
|
||||||
@ -217,7 +224,12 @@ static void find_decision_boundary_ANN( const Mat& layer_sizes )
|
|||||||
Mat samples = prepare_train_samples(trainedPoints);
|
Mat samples = prepare_train_samples(trainedPoints);
|
||||||
Ptr<TrainData> tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses);
|
Ptr<TrainData> tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses);
|
||||||
|
|
||||||
Ptr<ANN_MLP> ann = StatModel::train<ANN_MLP>(tdata, params);
|
Ptr<ANN_MLP> ann = ANN_MLP::create();
|
||||||
|
ann->setLayerSizes(layer_sizes);
|
||||||
|
ann->setActivationFunction(ANN_MLP::SIGMOID_SYM, 1, 1);
|
||||||
|
ann->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON));
|
||||||
|
ann->setTrainMethod(ANN_MLP::BACKPROP, 0.001);
|
||||||
|
ann->train(tdata);
|
||||||
predict_and_paint(ann, imgDst);
|
predict_and_paint(ann, imgDst);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
@ -247,8 +259,11 @@ static void find_decision_boundary_EM()
|
|||||||
// learn models
|
// learn models
|
||||||
if( !modelSamples.empty() )
|
if( !modelSamples.empty() )
|
||||||
{
|
{
|
||||||
em_models[i] = EM::train(modelSamples, noArray(), noArray(), noArray(),
|
Ptr<EM> em = EM::create();
|
||||||
EM::Params(componentCount, EM::COV_MAT_DIAGONAL));
|
em->setClustersNumber(componentCount);
|
||||||
|
em->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
|
||||||
|
em->trainEM(modelSamples, noArray(), noArray(), noArray());
|
||||||
|
em_models[i] = em;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -332,33 +347,20 @@ int main()
|
|||||||
imshow( "NormalBayesClassifier", imgDst );
|
imshow( "NormalBayesClassifier", imgDst );
|
||||||
#endif
|
#endif
|
||||||
#if _KNN_
|
#if _KNN_
|
||||||
int K = 3;
|
find_decision_boundary_KNN( 3 );
|
||||||
find_decision_boundary_KNN( K );
|
|
||||||
imshow( "kNN", imgDst );
|
imshow( "kNN", imgDst );
|
||||||
|
|
||||||
K = 15;
|
find_decision_boundary_KNN( 15 );
|
||||||
find_decision_boundary_KNN( K );
|
|
||||||
imshow( "kNN2", imgDst );
|
imshow( "kNN2", imgDst );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if _SVM_
|
#if _SVM_
|
||||||
//(1)-(2)separable and not sets
|
//(1)-(2)separable and not sets
|
||||||
SVM::Params params;
|
|
||||||
params.svmType = SVM::C_SVC;
|
|
||||||
params.kernelType = SVM::POLY; //CvSVM::LINEAR;
|
|
||||||
params.degree = 0.5;
|
|
||||||
params.gamma = 1;
|
|
||||||
params.coef0 = 1;
|
|
||||||
params.C = 1;
|
|
||||||
params.nu = 0.5;
|
|
||||||
params.p = 0;
|
|
||||||
params.termCrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01);
|
|
||||||
|
|
||||||
find_decision_boundary_SVM( params );
|
find_decision_boundary_SVM( 1 );
|
||||||
imshow( "classificationSVM1", imgDst );
|
imshow( "classificationSVM1", imgDst );
|
||||||
|
|
||||||
params.C = 10;
|
find_decision_boundary_SVM( 10 );
|
||||||
find_decision_boundary_SVM( params );
|
|
||||||
imshow( "classificationSVM2", imgDst );
|
imshow( "classificationSVM2", imgDst );
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ Mat get_hogdescriptor_visu(const Mat& color_origImg, vector<float>& descriptorVa
|
|||||||
|
|
||||||
int cellSize = 8;
|
int cellSize = 8;
|
||||||
int gradientBinSize = 9;
|
int gradientBinSize = 9;
|
||||||
float radRangeForOneBin = (float)(CV_PI/(float)gradientBinSize); // dividing 180° into 9 bins, how large (in rad) is one bin?
|
float radRangeForOneBin = (float)(CV_PI/(float)gradientBinSize); // dividing 180 into 9 bins, how large (in rad) is one bin?
|
||||||
|
|
||||||
// prepare data structure: 9 orientation / gradient strenghts for each cell
|
// prepare data structure: 9 orientation / gradient strenghts for each cell
|
||||||
int cells_in_x_dir = DIMX / cellSize;
|
int cells_in_x_dir = DIMX / cellSize;
|
||||||
@ -313,23 +313,23 @@ void compute_hog( const vector< Mat > & img_lst, vector< Mat > & gradient_lst, c
|
|||||||
|
|
||||||
void train_svm( const vector< Mat > & gradient_lst, const vector< int > & labels )
|
void train_svm( const vector< Mat > & gradient_lst, const vector< int > & labels )
|
||||||
{
|
{
|
||||||
/* Default values to train SVM */
|
|
||||||
SVM::Params params;
|
|
||||||
params.coef0 = 0.0;
|
|
||||||
params.degree = 3;
|
|
||||||
params.termCrit.epsilon = 1e-3;
|
|
||||||
params.gamma = 0;
|
|
||||||
params.kernelType = SVM::LINEAR;
|
|
||||||
params.nu = 0.5;
|
|
||||||
params.p = 0.1; // for EPSILON_SVR, epsilon in loss function?
|
|
||||||
params.C = 0.01; // From paper, soft classifier
|
|
||||||
params.svmType = SVM::EPS_SVR; // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task
|
|
||||||
|
|
||||||
Mat train_data;
|
Mat train_data;
|
||||||
convert_to_ml( gradient_lst, train_data );
|
convert_to_ml( gradient_lst, train_data );
|
||||||
|
|
||||||
clog << "Start training...";
|
clog << "Start training...";
|
||||||
Ptr<SVM> svm = StatModel::train<SVM>(train_data, ROW_SAMPLE, Mat(labels), params);
|
Ptr<SVM> svm = SVM::create();
|
||||||
|
/* Default values to train SVM */
|
||||||
|
svm->setCoef0(0.0);
|
||||||
|
svm->setDegree(3);
|
||||||
|
svm->setTermCriteria(TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, 1e-3 ));
|
||||||
|
svm->setGamma(0);
|
||||||
|
svm->setKernel(SVM::LINEAR);
|
||||||
|
svm->setNu(0.5);
|
||||||
|
svm->setP(0.1); // for EPSILON_SVR, epsilon in loss function?
|
||||||
|
svm->setC(0.01); // From paper, soft classifier
|
||||||
|
svm->setType(SVM::EPS_SVR); // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task
|
||||||
|
svm->train(train_data, ROW_SAMPLE, Mat(labels));
|
||||||
clog << "...[done]" << endl;
|
clog << "...[done]" << endl;
|
||||||
|
|
||||||
svm->save( "my_people_detector.yml" );
|
svm->save( "my_people_detector.yml" );
|
||||||
|
@ -73,18 +73,42 @@ int main(int argc, char** argv)
|
|||||||
data->setTrainTestSplitRatio(train_test_split_ratio);
|
data->setTrainTestSplitRatio(train_test_split_ratio);
|
||||||
|
|
||||||
printf("======DTREE=====\n");
|
printf("======DTREE=====\n");
|
||||||
Ptr<DTrees> dtree = DTrees::create(DTrees::Params( 10, 2, 0, false, 16, 0, false, false, Mat() ));
|
Ptr<DTrees> dtree = DTrees::create();
|
||||||
|
dtree->setMaxDepth(10);
|
||||||
|
dtree->setMinSampleCount(2);
|
||||||
|
dtree->setRegressionAccuracy(0);
|
||||||
|
dtree->setUseSurrogates(false);
|
||||||
|
dtree->setMaxCategories(16);
|
||||||
|
dtree->setCVFolds(0);
|
||||||
|
dtree->setUse1SERule(false);
|
||||||
|
dtree->setTruncatePrunedTree(false);
|
||||||
|
dtree->setPriors(Mat());
|
||||||
train_and_print_errs(dtree, data);
|
train_and_print_errs(dtree, data);
|
||||||
|
|
||||||
if( (int)data->getClassLabels().total() <= 2 ) // regression or 2-class classification problem
|
if( (int)data->getClassLabels().total() <= 2 ) // regression or 2-class classification problem
|
||||||
{
|
{
|
||||||
printf("======BOOST=====\n");
|
printf("======BOOST=====\n");
|
||||||
Ptr<Boost> boost = Boost::create(Boost::Params(Boost::GENTLE, 100, 0.95, 2, false, Mat()));
|
Ptr<Boost> boost = Boost::create();
|
||||||
|
boost->setBoostType(Boost::GENTLE);
|
||||||
|
boost->setWeakCount(100);
|
||||||
|
boost->setWeightTrimRate(0.95);
|
||||||
|
boost->setMaxDepth(2);
|
||||||
|
boost->setUseSurrogates(false);
|
||||||
|
boost->setPriors(Mat());
|
||||||
train_and_print_errs(boost, data);
|
train_and_print_errs(boost, data);
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("======RTREES=====\n");
|
printf("======RTREES=====\n");
|
||||||
Ptr<RTrees> rtrees = RTrees::create(RTrees::Params(10, 2, 0, false, 16, Mat(), false, 0, TermCriteria(TermCriteria::MAX_ITER, 100, 0)));
|
Ptr<RTrees> rtrees = RTrees::create();
|
||||||
|
rtrees->setMaxDepth(10);
|
||||||
|
rtrees->setMinSampleCount(2);
|
||||||
|
rtrees->setRegressionAccuracy(0);
|
||||||
|
rtrees->setUseSurrogates(false);
|
||||||
|
rtrees->setMaxCategories(16);
|
||||||
|
rtrees->setPriors(Mat());
|
||||||
|
rtrees->setCalculateVarImportance(false);
|
||||||
|
rtrees->setActiveVarCount(0);
|
||||||
|
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0));
|
||||||
train_and_print_errs(rtrees, data);
|
train_and_print_errs(rtrees, data);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -138,7 +138,7 @@ int main(int argc, char **argv)
|
|||||||
|
|
||||||
Stats stats, akaze_stats, orb_stats;
|
Stats stats, akaze_stats, orb_stats;
|
||||||
Ptr<AKAZE> akaze = AKAZE::create();
|
Ptr<AKAZE> akaze = AKAZE::create();
|
||||||
akaze->set("threshold", akaze_thresh);
|
akaze->setThreshold(akaze_thresh);
|
||||||
Ptr<ORB> orb = ORB::create();
|
Ptr<ORB> orb = ORB::create();
|
||||||
orb->setMaxFeatures(stats.keypoints);
|
orb->setMaxFeatures(stats.keypoints);
|
||||||
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
|
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
|
||||||
@ -163,7 +163,7 @@ int main(int argc, char **argv)
|
|||||||
akaze_draw_stats = stats;
|
akaze_draw_stats = stats;
|
||||||
}
|
}
|
||||||
|
|
||||||
orb_tracker.getDetector()->set("nFeatures", stats.keypoints);
|
orb->setMaxFeatures(stats.keypoints);
|
||||||
orb_res = orb_tracker.process(frame, stats);
|
orb_res = orb_tracker.process(frame, stats);
|
||||||
orb_stats += stats;
|
orb_stats += stats;
|
||||||
if(update_stats) {
|
if(update_stats) {
|
||||||
|
@ -14,23 +14,30 @@ int main(int, char**)
|
|||||||
Mat image = Mat::zeros(height, width, CV_8UC3);
|
Mat image = Mat::zeros(height, width, CV_8UC3);
|
||||||
|
|
||||||
// Set up training data
|
// Set up training data
|
||||||
|
//! [setup1]
|
||||||
int labels[4] = {1, -1, -1, -1};
|
int labels[4] = {1, -1, -1, -1};
|
||||||
Mat labelsMat(4, 1, CV_32SC1, labels);
|
|
||||||
|
|
||||||
float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} };
|
float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} };
|
||||||
|
//! [setup1]
|
||||||
|
//! [setup2]
|
||||||
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
|
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
|
||||||
|
Mat labelsMat(4, 1, CV_32SC1, labels);
|
||||||
|
//! [setup2]
|
||||||
|
|
||||||
// Set up SVM's parameters
|
|
||||||
SVM::Params params;
|
|
||||||
params.svmType = SVM::C_SVC;
|
|
||||||
params.kernelType = SVM::LINEAR;
|
|
||||||
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6);
|
|
||||||
|
|
||||||
// Train the SVM
|
// Train the SVM
|
||||||
Ptr<SVM> svm = StatModel::train<SVM>(trainingDataMat, ROW_SAMPLE, labelsMat, params);
|
//! [init]
|
||||||
|
Ptr<SVM> svm = SVM::create();
|
||||||
|
svm->setType(SVM::C_SVC);
|
||||||
|
svm->setKernel(SVM::LINEAR);
|
||||||
|
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
|
||||||
|
//! [init]
|
||||||
|
//! [train]
|
||||||
|
svm->train(trainingDataMat, ROW_SAMPLE, labelsMat);
|
||||||
|
//! [train]
|
||||||
|
|
||||||
Vec3b green(0,255,0), blue (255,0,0);
|
|
||||||
// Show the decision regions given by the SVM
|
// Show the decision regions given by the SVM
|
||||||
|
//! [show]
|
||||||
|
Vec3b green(0,255,0), blue (255,0,0);
|
||||||
for (int i = 0; i < image.rows; ++i)
|
for (int i = 0; i < image.rows; ++i)
|
||||||
for (int j = 0; j < image.cols; ++j)
|
for (int j = 0; j < image.cols; ++j)
|
||||||
{
|
{
|
||||||
@ -42,16 +49,20 @@ int main(int, char**)
|
|||||||
else if (response == -1)
|
else if (response == -1)
|
||||||
image.at<Vec3b>(i,j) = blue;
|
image.at<Vec3b>(i,j) = blue;
|
||||||
}
|
}
|
||||||
|
//! [show]
|
||||||
|
|
||||||
// Show the training data
|
// Show the training data
|
||||||
|
//! [show_data]
|
||||||
int thickness = -1;
|
int thickness = -1;
|
||||||
int lineType = 8;
|
int lineType = 8;
|
||||||
circle( image, Point(501, 10), 5, Scalar( 0, 0, 0), thickness, lineType );
|
circle( image, Point(501, 10), 5, Scalar( 0, 0, 0), thickness, lineType );
|
||||||
circle( image, Point(255, 10), 5, Scalar(255, 255, 255), thickness, lineType );
|
circle( image, Point(255, 10), 5, Scalar(255, 255, 255), thickness, lineType );
|
||||||
circle( image, Point(501, 255), 5, Scalar(255, 255, 255), thickness, lineType );
|
circle( image, Point(501, 255), 5, Scalar(255, 255, 255), thickness, lineType );
|
||||||
circle( image, Point( 10, 501), 5, Scalar(255, 255, 255), thickness, lineType );
|
circle( image, Point( 10, 501), 5, Scalar(255, 255, 255), thickness, lineType );
|
||||||
|
//! [show_data]
|
||||||
|
|
||||||
// Show support vectors
|
// Show support vectors
|
||||||
|
//! [show_vectors]
|
||||||
thickness = 2;
|
thickness = 2;
|
||||||
lineType = 8;
|
lineType = 8;
|
||||||
Mat sv = svm->getSupportVectors();
|
Mat sv = svm->getSupportVectors();
|
||||||
@ -61,6 +72,7 @@ int main(int, char**)
|
|||||||
const float* v = sv.ptr<float>(i);
|
const float* v = sv.ptr<float>(i);
|
||||||
circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
|
circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
|
||||||
}
|
}
|
||||||
|
//! [show_vectors]
|
||||||
|
|
||||||
imwrite("result.png", image); // save the image
|
imwrite("result.png", image); // save the image
|
||||||
|
|
||||||
|
@ -39,6 +39,7 @@ int main()
|
|||||||
// Set up the linearly separable part of the training data
|
// Set up the linearly separable part of the training data
|
||||||
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
|
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
|
||||||
|
|
||||||
|
//! [setup1]
|
||||||
// Generate random points for the class 1
|
// Generate random points for the class 1
|
||||||
Mat trainClass = trainData.rowRange(0, nLinearSamples);
|
Mat trainClass = trainData.rowRange(0, nLinearSamples);
|
||||||
// The x coordinate of the points is in [0, 0.4)
|
// The x coordinate of the points is in [0, 0.4)
|
||||||
@ -56,9 +57,10 @@ int main()
|
|||||||
// The y coordinate of the points is in [0, 1)
|
// The y coordinate of the points is in [0, 1)
|
||||||
c = trainClass.colRange(1,2);
|
c = trainClass.colRange(1,2);
|
||||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
||||||
|
//! [setup1]
|
||||||
|
|
||||||
//------------------ Set up the non-linearly separable part of the training data ---------------
|
//------------------ Set up the non-linearly separable part of the training data ---------------
|
||||||
|
//! [setup2]
|
||||||
// Generate random points for the classes 1 and 2
|
// Generate random points for the classes 1 and 2
|
||||||
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
|
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
|
||||||
// The x coordinate of the points is in [0.4, 0.6)
|
// The x coordinate of the points is in [0.4, 0.6)
|
||||||
@ -67,24 +69,28 @@ int main()
|
|||||||
// The y coordinate of the points is in [0, 1)
|
// The y coordinate of the points is in [0, 1)
|
||||||
c = trainClass.colRange(1,2);
|
c = trainClass.colRange(1,2);
|
||||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
||||||
|
//! [setup2]
|
||||||
//------------------------- Set up the labels for the classes ---------------------------------
|
//------------------------- Set up the labels for the classes ---------------------------------
|
||||||
labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1
|
labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1
|
||||||
labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2
|
labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2
|
||||||
|
|
||||||
//------------------------ 2. Set up the support vector machines parameters --------------------
|
//------------------------ 2. Set up the support vector machines parameters --------------------
|
||||||
SVM::Params params;
|
|
||||||
params.svmType = SVM::C_SVC;
|
|
||||||
params.C = 0.1;
|
|
||||||
params.kernelType = SVM::LINEAR;
|
|
||||||
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6);
|
|
||||||
|
|
||||||
//------------------------ 3. Train the svm ----------------------------------------------------
|
//------------------------ 3. Train the svm ----------------------------------------------------
|
||||||
cout << "Starting training process" << endl;
|
cout << "Starting training process" << endl;
|
||||||
Ptr<SVM> svm = StatModel::train<SVM>(trainData, ROW_SAMPLE, labels, params);
|
//! [init]
|
||||||
|
Ptr<SVM> svm = SVM::create();
|
||||||
|
svm->setType(SVM::C_SVC);
|
||||||
|
svm->setC(0.1);
|
||||||
|
svm->setKernel(SVM::LINEAR);
|
||||||
|
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6));
|
||||||
|
//! [init]
|
||||||
|
//! [train]
|
||||||
|
svm->train(trainData, ROW_SAMPLE, labels);
|
||||||
|
//! [train]
|
||||||
cout << "Finished training process" << endl;
|
cout << "Finished training process" << endl;
|
||||||
|
|
||||||
//------------------------ 4. Show the decision regions ----------------------------------------
|
//------------------------ 4. Show the decision regions ----------------------------------------
|
||||||
|
//! [show]
|
||||||
Vec3b green(0,100,0), blue (100,0,0);
|
Vec3b green(0,100,0), blue (100,0,0);
|
||||||
for (int i = 0; i < I.rows; ++i)
|
for (int i = 0; i < I.rows; ++i)
|
||||||
for (int j = 0; j < I.cols; ++j)
|
for (int j = 0; j < I.cols; ++j)
|
||||||
@ -95,8 +101,10 @@ int main()
|
|||||||
if (response == 1) I.at<Vec3b>(j, i) = green;
|
if (response == 1) I.at<Vec3b>(j, i) = green;
|
||||||
else if (response == 2) I.at<Vec3b>(j, i) = blue;
|
else if (response == 2) I.at<Vec3b>(j, i) = blue;
|
||||||
}
|
}
|
||||||
|
//! [show]
|
||||||
|
|
||||||
//----------------------- 5. Show the training data --------------------------------------------
|
//----------------------- 5. Show the training data --------------------------------------------
|
||||||
|
//! [show_data]
|
||||||
int thick = -1;
|
int thick = -1;
|
||||||
int lineType = 8;
|
int lineType = 8;
|
||||||
float px, py;
|
float px, py;
|
||||||
@ -114,8 +122,10 @@ int main()
|
|||||||
py = trainData.at<float>(i,1);
|
py = trainData.at<float>(i,1);
|
||||||
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
|
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
|
||||||
}
|
}
|
||||||
|
//! [show_data]
|
||||||
|
|
||||||
//------------------------- 6. Show support vectors --------------------------------------------
|
//------------------------- 6. Show support vectors --------------------------------------------
|
||||||
|
//! [show_vectors]
|
||||||
thick = 2;
|
thick = 2;
|
||||||
lineType = 8;
|
lineType = 8;
|
||||||
Mat sv = svm->getSupportVectors();
|
Mat sv = svm->getSupportVectors();
|
||||||
@ -125,6 +135,7 @@ int main()
|
|||||||
const float* v = sv.ptr<float>(i);
|
const float* v = sv.ptr<float>(i);
|
||||||
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
|
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
|
||||||
}
|
}
|
||||||
|
//! [show_vectors]
|
||||||
|
|
||||||
imwrite("result.png", I); // save the Image
|
imwrite("result.png", I); // save the Image
|
||||||
imshow("SVM for Non-Linear Training Data", I); // show it to the user
|
imshow("SVM for Non-Linear Training Data", I); // show it to the user
|
||||||
|
@ -26,32 +26,32 @@ using namespace cv::superres;
|
|||||||
cout << tm.getTimeSec() << " sec" << endl; \
|
cout << tm.getTimeSec() << " sec" << endl; \
|
||||||
}
|
}
|
||||||
|
|
||||||
static Ptr<DenseOpticalFlowExt> createOptFlow(const string& name, bool useGpu)
|
static Ptr<cv::superres::DenseOpticalFlowExt> createOptFlow(const string& name, bool useGpu)
|
||||||
{
|
{
|
||||||
if (name == "farneback")
|
if (name == "farneback")
|
||||||
{
|
{
|
||||||
if (useGpu)
|
if (useGpu)
|
||||||
return createOptFlow_Farneback_CUDA();
|
return cv::superres::createOptFlow_Farneback_CUDA();
|
||||||
else
|
else
|
||||||
return createOptFlow_Farneback();
|
return cv::superres::createOptFlow_Farneback();
|
||||||
}
|
}
|
||||||
/*else if (name == "simple")
|
/*else if (name == "simple")
|
||||||
return createOptFlow_Simple();*/
|
return createOptFlow_Simple();*/
|
||||||
else if (name == "tvl1")
|
else if (name == "tvl1")
|
||||||
{
|
{
|
||||||
if (useGpu)
|
if (useGpu)
|
||||||
return createOptFlow_DualTVL1_CUDA();
|
return cv::superres::createOptFlow_DualTVL1_CUDA();
|
||||||
else
|
else
|
||||||
return createOptFlow_DualTVL1();
|
return cv::superres::createOptFlow_DualTVL1();
|
||||||
}
|
}
|
||||||
else if (name == "brox")
|
else if (name == "brox")
|
||||||
return createOptFlow_Brox_CUDA();
|
return cv::superres::createOptFlow_Brox_CUDA();
|
||||||
else if (name == "pyrlk")
|
else if (name == "pyrlk")
|
||||||
return createOptFlow_PyrLK_CUDA();
|
return cv::superres::createOptFlow_PyrLK_CUDA();
|
||||||
else
|
else
|
||||||
cerr << "Incorrect Optical Flow algorithm - " << name << endl;
|
cerr << "Incorrect Optical Flow algorithm - " << name << endl;
|
||||||
|
|
||||||
return Ptr<DenseOpticalFlowExt>();
|
return Ptr<cv::superres::DenseOpticalFlowExt>();
|
||||||
}
|
}
|
||||||
|
|
||||||
int main(int argc, const char* argv[])
|
int main(int argc, const char* argv[])
|
||||||
@ -92,15 +92,15 @@ int main(int argc, const char* argv[])
|
|||||||
else
|
else
|
||||||
superRes = createSuperResolution_BTVL1();
|
superRes = createSuperResolution_BTVL1();
|
||||||
|
|
||||||
Ptr<DenseOpticalFlowExt> of = createOptFlow(optFlow, useCuda);
|
Ptr<cv::superres::DenseOpticalFlowExt> of = createOptFlow(optFlow, useCuda);
|
||||||
|
|
||||||
if (of.empty())
|
if (of.empty())
|
||||||
return EXIT_FAILURE;
|
return EXIT_FAILURE;
|
||||||
superRes->set("opticalFlow", of);
|
superRes->setOpticalFlow(of);
|
||||||
|
|
||||||
superRes->set("scale", scale);
|
superRes->setScale(scale);
|
||||||
superRes->set("iterations", iterations);
|
superRes->setIterations(iterations);
|
||||||
superRes->set("temporalAreaRadius", temporalAreaRadius);
|
superRes->setTemporalAreaRadius(temporalAreaRadius);
|
||||||
|
|
||||||
Ptr<FrameSource> frameSource;
|
Ptr<FrameSource> frameSource;
|
||||||
if (useCuda)
|
if (useCuda)
|
||||||
|
@ -62,19 +62,17 @@ int main(int argc, char* argv[])
|
|||||||
cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
|
cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
|
||||||
|
|
||||||
// matching descriptors
|
// matching descriptors
|
||||||
BFMatcher_CUDA matcher(surf.defaultNorm());
|
Ptr<cv::cuda::DescriptorMatcher> matcher = cv::cuda::DescriptorMatcher::createBFMatcher(surf.defaultNorm());
|
||||||
GpuMat trainIdx, distance;
|
vector<DMatch> matches;
|
||||||
matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);
|
matcher->match(descriptors1GPU, descriptors2GPU, matches);
|
||||||
|
|
||||||
// downloading results
|
// downloading results
|
||||||
vector<KeyPoint> keypoints1, keypoints2;
|
vector<KeyPoint> keypoints1, keypoints2;
|
||||||
vector<float> descriptors1, descriptors2;
|
vector<float> descriptors1, descriptors2;
|
||||||
vector<DMatch> matches;
|
|
||||||
surf.downloadKeypoints(keypoints1GPU, keypoints1);
|
surf.downloadKeypoints(keypoints1GPU, keypoints1);
|
||||||
surf.downloadKeypoints(keypoints2GPU, keypoints2);
|
surf.downloadKeypoints(keypoints2GPU, keypoints2);
|
||||||
surf.downloadDescriptors(descriptors1GPU, descriptors1);
|
surf.downloadDescriptors(descriptors1GPU, descriptors1);
|
||||||
surf.downloadDescriptors(descriptors2GPU, descriptors2);
|
surf.downloadDescriptors(descriptors2GPU, descriptors2);
|
||||||
BFMatcher_CUDA::matchDownload(trainIdx, distance, matches);
|
|
||||||
|
|
||||||
// drawing the results
|
// drawing the results
|
||||||
Mat img_matches;
|
Mat img_matches;
|
||||||
|
Loading…
Reference in New Issue
Block a user