mirror of
https://github.com/opencv/opencv.git
synced 2024-11-27 20:50:25 +08:00
Updated ml module interfaces and documentation
This commit is contained in:
parent
da383e65e2
commit
79e8f0680c
@ -244,7 +244,10 @@ PREDEFINED = __cplusplus=1 \
|
||||
CV_DEFAULT(x)=" = x" \
|
||||
CV_NEON=1 \
|
||||
FLANN_DEPRECATED= \
|
||||
"CV_PURE_PROPERTY(type, name)= /**\@{*/ virtual type get##name() const = 0; virtual void set##name(type _##name) = 0; /**\@}*/"
|
||||
"CV_PURE_PROPERTY(type, name)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
|
||||
"CV_IMPL_PROPERTY(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
|
||||
"CV_IMPL_PROPERTY_S(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(const type & val);" \
|
||||
"CV_IMPL_PROPERTY_RO(type, name, x)= virtual type get##name() const;"
|
||||
EXPAND_AS_DEFINED =
|
||||
SKIP_FUNCTION_MACROS = YES
|
||||
TAGFILES =
|
||||
|
@ -1,8 +1,6 @@
|
||||
Introduction to Support Vector Machines {#tutorial_introduction_to_svm}
|
||||
=======================================
|
||||
|
||||
@todo update this tutorial
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
@ -31,13 +29,11 @@ understand that this is done only because our intuition is better built from exa
|
||||
to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space
|
||||
whose dimension is higher than two.
|
||||
|
||||
In the above picture you can see that there exists multiple
|
||||
lines that offer a solution to the problem. Is any of them better than the others? We can
|
||||
intuitively define a criterion to estimate the worth of the lines:
|
||||
|
||||
- A line is bad if it passes too close to the points because it will be noise sensitive and it will
|
||||
not generalize correctly. Therefore, our goal should be to find the line passing as far as
|
||||
possible from all points.
|
||||
In the above picture you can see that there exists multiple lines that offer a solution to the
|
||||
problem. Is any of them better than the others? We can intuitively define a criterion to estimate
|
||||
the worth of the lines: <em> A line is bad if it passes too close to the points because it will be
|
||||
noise sensitive and it will not generalize correctly. </em> Therefore, our goal should be to find
|
||||
the line passing as far as possible from all points.
|
||||
|
||||
Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest
|
||||
minimum distance to the training examples. Twice, this distance receives the important name of
|
||||
@ -57,7 +53,7 @@ where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bi
|
||||
|
||||
@sa A more in depth description of this and hyperplanes you can find in the section 4.5 (*Seperating
|
||||
Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H.
|
||||
Friedman.
|
||||
Friedman (@cite HTF01).
|
||||
|
||||
The optimal hyperplane can be represented in an infinite number of different ways by
|
||||
scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible
|
||||
@ -107,17 +103,14 @@ Explanation
|
||||
|
||||
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
|
||||
two different classes; one of the classes consists of one point and the other of three points.
|
||||
@code{.cpp}
|
||||
float labels[4] = {1.0, -1.0, -1.0, -1.0};
|
||||
float trainingData[4][2] = {{501, 10}, {255, 10}, {501, 255}, {10, 501}};
|
||||
@endcode
|
||||
|
||||
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup1
|
||||
|
||||
The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be
|
||||
stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays
|
||||
defined above:
|
||||
@code{.cpp}
|
||||
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
|
||||
Mat labelsMat (4, 1, CV_32FC1, labels);
|
||||
@endcode
|
||||
|
||||
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup2
|
||||
|
||||
-# **Set up SVM's parameters**
|
||||
|
||||
@ -126,42 +119,35 @@ Explanation
|
||||
used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using
|
||||
a kernel function to raise the dimensionality of the examples, etc). As a consequence of this,
|
||||
we have to define some parameters before training the SVM. These parameters are stored in an
|
||||
object of the class @ref cv::ml::SVM::Params .
|
||||
@code{.cpp}
|
||||
ml::SVM::Params params;
|
||||
params.svmType = ml::SVM::C_SVC;
|
||||
params.kernelType = ml::SVM::LINEAR;
|
||||
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6);
|
||||
@endcode
|
||||
- *Type of SVM*. We choose here the type **ml::SVM::C_SVC** that can be used for n-class
|
||||
classification (n \f$\geq\f$ 2). This parameter is defined in the attribute
|
||||
*ml::SVM::Params.svmType*.
|
||||
object of the class @ref cv::ml::SVM.
|
||||
|
||||
The important feature of the type of SVM **CvSVM::C_SVC** deals with imperfect separation of classes (i.e. when the training data is non-linearly separable). This feature is not important here since the data is linearly separable and we chose this SVM type only for being the most commonly used.
|
||||
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp init
|
||||
|
||||
Here:
|
||||
- *Type of SVM*. We choose here the type @ref cv::ml::SVM::C_SVC "C_SVC" that can be used for
|
||||
n-class classification (n \f$\geq\f$ 2). The important feature of this type is that it deals
|
||||
with imperfect separation of classes (i.e. when the training data is non-linearly separable).
|
||||
This feature is not important here since the data is linearly separable and we chose this SVM
|
||||
type only for being the most commonly used.
|
||||
|
||||
- *Type of SVM kernel*. We have not talked about kernel functions since they are not
|
||||
interesting for the training data we are dealing with. Nevertheless, let's explain briefly
|
||||
now the main idea behind a kernel function. It is a mapping done to the training data to
|
||||
improve its resemblance to a linearly separable set of data. This mapping consists of
|
||||
increasing the dimensionality of the data and is done efficiently using a kernel function.
|
||||
We choose here the type **ml::SVM::LINEAR** which means that no mapping is done. This
|
||||
parameter is defined in the attribute *ml::SVMParams.kernel_type*.
|
||||
interesting for the training data we are dealing with. Nevertheless, let's explain briefly now
|
||||
the main idea behind a kernel function. It is a mapping done to the training data to improve
|
||||
its resemblance to a linearly separable set of data. This mapping consists of increasing the
|
||||
dimensionality of the data and is done efficiently using a kernel function. We choose here the
|
||||
type @ref cv::ml::SVM::LINEAR "LINEAR" which means that no mapping is done. This parameter is
|
||||
defined using cv::ml::SVM::setKernel.
|
||||
|
||||
- *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a
|
||||
constrained quadratic optimization problem in an **iterative** fashion. Here we specify a
|
||||
maximum number of iterations and a tolerance error so we allow the algorithm to finish in
|
||||
less number of steps even if the optimal hyperplane has not been computed yet. This
|
||||
parameter is defined in a structure @ref cv::cvTermCriteria .
|
||||
parameter is defined in a structure @ref cv::TermCriteria .
|
||||
|
||||
-# **Train the SVM**
|
||||
We call the method @ref cv::ml::SVM::train to build the SVM model.
|
||||
|
||||
We call the method
|
||||
[CvSVM::train](http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#cvsvm-train)
|
||||
to build the SVM model.
|
||||
@code{.cpp}
|
||||
CvSVM SVM;
|
||||
SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp train
|
||||
|
||||
-# **Regions classified by the SVM**
|
||||
|
||||
@ -170,22 +156,8 @@ Explanation
|
||||
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
|
||||
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
|
||||
green if it is the class with label 1 and in blue if it is the class with label -1.
|
||||
@code{.cpp}
|
||||
Vec3b green(0,255,0), blue (255,0,0);
|
||||
|
||||
for (int i = 0; i < image.rows; ++i)
|
||||
for (int j = 0; j < image.cols; ++j)
|
||||
{
|
||||
Mat sampleMat = (Mat_<float>(1,2) << i,j);
|
||||
float response = SVM.predict(sampleMat);
|
||||
|
||||
if (response == 1)
|
||||
image.at<Vec3b>(j, i) = green;
|
||||
else
|
||||
if (response == -1)
|
||||
image.at<Vec3b>(j, i) = blue;
|
||||
}
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show
|
||||
|
||||
-# **Support vectors**
|
||||
|
||||
@ -193,15 +165,8 @@ Explanation
|
||||
The method @ref cv::ml::SVM::getSupportVectors obtain all of the support
|
||||
vectors. We have used this methods here to find the training examples that are
|
||||
support vectors and highlight them.
|
||||
@code{.cpp}
|
||||
int c = SVM.get_support_vector_count();
|
||||
|
||||
for (int i = 0; i < c; ++i)
|
||||
{
|
||||
const float* v = SVM.get_support_vector(i); // get and then highlight with grayscale
|
||||
circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
|
||||
}
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show_vectors
|
||||
|
||||
Results
|
||||
-------
|
||||
|
@ -1,8 +1,6 @@
|
||||
Support Vector Machines for Non-Linearly Separable Data {#tutorial_non_linear_svms}
|
||||
=======================================================
|
||||
|
||||
@todo update this tutorial
|
||||
|
||||
Goal
|
||||
----
|
||||
|
||||
@ -10,21 +8,20 @@ In this tutorial you will learn how to:
|
||||
|
||||
- Define the optimization problem for SVMs when it is not possible to separate linearly the
|
||||
training data.
|
||||
- How to configure the parameters in @ref cv::ml::SVM::Params to adapt your SVM for this class of
|
||||
problems.
|
||||
- How to configure the parameters to adapt your SVM for this class of problems.
|
||||
|
||||
Motivation
|
||||
----------
|
||||
|
||||
Why is it interesting to extend the SVM optimation problem in order to handle non-linearly separable
|
||||
training data? Most of the applications in which SVMs are used in computer vision require a more
|
||||
powerful tool than a simple linear classifier. This stems from the fact that in these tasks **the
|
||||
training data can be rarely separated using an hyperplane**.
|
||||
powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the
|
||||
training data can be rarely separated using an hyperplane__.
|
||||
|
||||
Consider one of these tasks, for example, face detection. The training data in this case is composed
|
||||
by a set of images that are faces and another set of images that are non-faces (*every other thing
|
||||
in the world except from faces*). This training data is too complex so as to find a representation
|
||||
of each sample (*feature vector*) that could make the whole set of faces linearly separable from the
|
||||
by a set of images that are faces and another set of images that are non-faces (_every other thing
|
||||
in the world except from faces_). This training data is too complex so as to find a representation
|
||||
of each sample (_feature vector_) that could make the whole set of faces linearly separable from the
|
||||
whole set of non-faces.
|
||||
|
||||
Extension of the Optimization Problem
|
||||
@ -32,13 +29,13 @@ Extension of the Optimization Problem
|
||||
|
||||
Remember that using SVMs we obtain a separating hyperplane. Therefore, since the training data is
|
||||
now non-linearly separable, we must admit that the hyperplane found will misclassify some of the
|
||||
samples. This *misclassification* is a new variable in the optimization that must be taken into
|
||||
samples. This _misclassification_ is a new variable in the optimization that must be taken into
|
||||
account. The new model has to include both the old requirement of finding the hyperplane that gives
|
||||
the biggest margin and the new one of generalizing the training data correctly by not allowing too
|
||||
many classification errors.
|
||||
|
||||
We start here from the formulation of the optimization problem of finding the hyperplane which
|
||||
maximizes the **margin** (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm):
|
||||
maximizes the __margin__ (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm):
|
||||
|
||||
\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i\f]
|
||||
|
||||
@ -50,8 +47,8 @@ constant times the number of misclassification errors in the training data, i.e.
|
||||
|
||||
However, this one is not a very good solution since, among some other reasons, we do not distinguish
|
||||
between samples that are misclassified with a small distance to their appropriate decision region or
|
||||
samples that are not. Therefore, a better solution will take into account the *distance of the
|
||||
misclassified samples to their correct decision regions*, i.e.:
|
||||
samples that are not. Therefore, a better solution will take into account the _distance of the
|
||||
misclassified samples to their correct decision regions_, i.e.:
|
||||
|
||||
\f[\min ||\beta||^{2} + C \text{(distance of misclassified samples to their correct regions)}\f]
|
||||
|
||||
@ -68,7 +65,7 @@ distances of the rest of the samples are zero since they lay already in their co
|
||||
region.
|
||||
|
||||
The red and blue lines that appear on the picture are the margins to each one of the
|
||||
decision regions. It is very **important** to realize that each of the \f$\xi_{i}\f$ goes from a
|
||||
decision regions. It is very __important__ to realize that each of the \f$\xi_{i}\f$ goes from a
|
||||
misclassified training sample to the margin of its appropriate region.
|
||||
|
||||
Finally, the new formulation for the optimization problem is:
|
||||
@ -79,26 +76,25 @@ How should the parameter C be chosen? It is obvious that the answer to this ques
|
||||
the training data is distributed. Although there is no general answer, it is useful to take into
|
||||
account these rules:
|
||||
|
||||
- Large values of C give solutions with *less misclassification errors* but a *smaller margin*.
|
||||
- Large values of C give solutions with _less misclassification errors_ but a _smaller margin_.
|
||||
Consider that in this case it is expensive to make misclassification errors. Since the aim of
|
||||
the optimization is to minimize the argument, few misclassifications errors are allowed.
|
||||
- Small values of C give solutions with *bigger margin* and *more classification errors*. In this
|
||||
- Small values of C give solutions with _bigger margin_ and _more classification errors_. In this
|
||||
case the minimization does not consider that much the term of the sum so it focuses more on
|
||||
finding a hyperplane with big margin.
|
||||
|
||||
Source Code
|
||||
-----------
|
||||
|
||||
You may also find the source code and these video file in the
|
||||
`samples/cpp/tutorial_code/gpu/non_linear_svms/non_linear_svms` folder of the OpenCV source library
|
||||
or [download it from here ](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
|
||||
You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or
|
||||
[download it from here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
|
||||
|
||||
@includelineno cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp
|
||||
|
||||
Explanation
|
||||
-----------
|
||||
|
||||
-# **Set up the training data**
|
||||
-# __Set up the training data__
|
||||
|
||||
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
|
||||
two different classes. To make the exercise more appealing, the training data is generated
|
||||
@ -107,136 +103,67 @@ Explanation
|
||||
We have divided the generation of the training data into two main parts.
|
||||
|
||||
In the first part we generate data for both classes that is linearly separable.
|
||||
@code{.cpp}
|
||||
// Generate random points for the class 1
|
||||
Mat trainClass = trainData.rowRange(0, nLinearSamples);
|
||||
// The x coordinate of the points is in [0, 0.4)
|
||||
Mat c = trainClass.colRange(0, 1);
|
||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4 * WIDTH));
|
||||
// The y coordinate of the points is in [0, 1)
|
||||
c = trainClass.colRange(1,2);
|
||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
||||
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup1
|
||||
|
||||
// Generate random points for the class 2
|
||||
trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
|
||||
// The x coordinate of the points is in [0.6, 1]
|
||||
c = trainClass.colRange(0 , 1);
|
||||
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
|
||||
// The y coordinate of the points is in [0, 1)
|
||||
c = trainClass.colRange(1,2);
|
||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
||||
@endcode
|
||||
In the second part we create data for both classes that is non-linearly separable, data that
|
||||
overlaps.
|
||||
@code{.cpp}
|
||||
// Generate random points for the classes 1 and 2
|
||||
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
|
||||
// The x coordinate of the points is in [0.4, 0.6)
|
||||
c = trainClass.colRange(0,1);
|
||||
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
|
||||
// The y coordinate of the points is in [0, 1)
|
||||
c = trainClass.colRange(1,2);
|
||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup2
|
||||
|
||||
-# **Set up SVM's parameters**
|
||||
-# __Set up SVM's parameters__
|
||||
|
||||
@sa
|
||||
In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the atributes of the
|
||||
class @ref cv::ml::SVM::Params that we configure here before training the SVM.
|
||||
@note In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the
|
||||
atributes of the class @ref cv::ml::SVM that we configure here before training the SVM.
|
||||
|
||||
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp init
|
||||
|
||||
@code{.cpp}
|
||||
CvSVMParams params;
|
||||
params.svm_type = SVM::C_SVC;
|
||||
params.C = 0.1;
|
||||
params.kernel_type = SVM::LINEAR;
|
||||
params.term_crit = TermCriteria(TermCriteria::ITER, (int)1e7, 1e-6);
|
||||
@endcode
|
||||
There are just two differences between the configuration we do here and the one that was done in
|
||||
the previous tutorial (tutorial_introduction_to_svm) that we use as reference.
|
||||
the previous tutorial (@ref tutorial_introduction_to_svm) that we use as reference.
|
||||
|
||||
- *CvSVM::C_SVC*. We chose here a small value of this parameter in order not to punish too much
|
||||
the misclassification errors in the optimization. The idea of doing this stems from the will
|
||||
of obtaining a solution close to the one intuitively expected. However, we recommend to get a
|
||||
- _C_. We chose here a small value of this parameter in order not to punish too much the
|
||||
misclassification errors in the optimization. The idea of doing this stems from the will of
|
||||
obtaining a solution close to the one intuitively expected. However, we recommend to get a
|
||||
better insight of the problem by making adjustments to this parameter.
|
||||
|
||||
@note Here there are just very few points in the overlapping region between classes, giving a smaller value to **FRAC_LINEAR_SEP** the density of points can be incremented and the impact of the parameter **CvSVM::C_SVC** explored deeply.
|
||||
@note In this case there are just very few points in the overlapping region between classes.
|
||||
By giving a smaller value to __FRAC_LINEAR_SEP__ the density of points can be incremented and the
|
||||
impact of the parameter _C_ explored deeply.
|
||||
|
||||
- *Termination Criteria of the algorithm*. The maximum number of iterations has to be
|
||||
- _Termination Criteria of the algorithm_. The maximum number of iterations has to be
|
||||
increased considerably in order to solve correctly a problem with non-linearly separable
|
||||
training data. In particular, we have increased in five orders of magnitude this value.
|
||||
|
||||
-# **Train the SVM**
|
||||
-# __Train the SVM__
|
||||
|
||||
We call the method @ref cv::ml::SVM::train to build the SVM model. Watch out that the training
|
||||
process may take a quite long time. Have patiance when your run the program.
|
||||
@code{.cpp}
|
||||
CvSVM svm;
|
||||
svm.train(trainData, labels, Mat(), Mat(), params);
|
||||
@endcode
|
||||
|
||||
-# **Show the Decision Regions**
|
||||
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp train
|
||||
|
||||
-# __Show the Decision Regions__
|
||||
|
||||
The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In
|
||||
this example we have used this method in order to color the space depending on the prediction done
|
||||
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
|
||||
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
|
||||
dark green if it is the class with label 1 and in dark blue if it is the class with label 2.
|
||||
@code{.cpp}
|
||||
Vec3b green(0,100,0), blue (100,0,0);
|
||||
for (int i = 0; i < I.rows; ++i)
|
||||
for (int j = 0; j < I.cols; ++j)
|
||||
{
|
||||
Mat sampleMat = (Mat_<float>(1,2) << i, j);
|
||||
float response = svm.predict(sampleMat);
|
||||
|
||||
if (response == 1) I.at<Vec3b>(j, i) = green;
|
||||
else if (response == 2) I.at<Vec3b>(j, i) = blue;
|
||||
}
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show
|
||||
|
||||
-# **Show the training data**
|
||||
-# __Show the training data__
|
||||
|
||||
The method @ref cv::circle is used to show the samples that compose the training data. The samples
|
||||
of the class labeled with 1 are shown in light green and in light blue the samples of the class
|
||||
labeled with 2.
|
||||
@code{.cpp}
|
||||
int thick = -1;
|
||||
int lineType = 8;
|
||||
float px, py;
|
||||
// Class 1
|
||||
for (int i = 0; i < NTRAINING_SAMPLES; ++i)
|
||||
{
|
||||
px = trainData.at<float>(i,0);
|
||||
py = trainData.at<float>(i,1);
|
||||
circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick, lineType);
|
||||
}
|
||||
// Class 2
|
||||
for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; ++i)
|
||||
{
|
||||
px = trainData.at<float>(i,0);
|
||||
py = trainData.at<float>(i,1);
|
||||
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
|
||||
}
|
||||
@endcode
|
||||
|
||||
-# **Support vectors**
|
||||
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_data
|
||||
|
||||
-# __Support vectors__
|
||||
|
||||
We use here a couple of methods to obtain information about the support vectors. The method
|
||||
@ref cv::ml::SVM::getSupportVectors obtain all support vectors.
|
||||
We have used this methods here to find the training examples that are
|
||||
support vectors and highlight them.
|
||||
@code{.cpp}
|
||||
thick = 2;
|
||||
lineType = 8;
|
||||
int x = svm.get_support_vector_count();
|
||||
@ref cv::ml::SVM::getSupportVectors obtain all support vectors. We have used this methods here
|
||||
to find the training examples that are support vectors and highlight them.
|
||||
|
||||
for (int i = 0; i < x; ++i)
|
||||
{
|
||||
const float* v = svm.get_support_vector(i);
|
||||
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
|
||||
}
|
||||
@endcode
|
||||
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_vectors
|
||||
|
||||
Results
|
||||
-------
|
||||
|
@ -2802,43 +2802,36 @@ public:
|
||||
|
||||
#define CV_PURE_PROPERTY(type, name) \
|
||||
CV_WRAP virtual type get##name() const = 0; \
|
||||
CV_WRAP virtual void set##name(type _##name) = 0;
|
||||
CV_WRAP virtual void set##name(type val) = 0;
|
||||
|
||||
#define CV_PURE_PROPERTY_S(type, name) \
|
||||
CV_WRAP virtual type get##name() const = 0; \
|
||||
CV_WRAP virtual void set##name(const type & _##name) = 0;
|
||||
CV_WRAP virtual void set##name(const type & val) = 0;
|
||||
|
||||
#define CV_PURE_PROPERTY_RO(type, name) \
|
||||
CV_WRAP virtual type get##name() const = 0;
|
||||
|
||||
// basic property implementation
|
||||
|
||||
#define CV_IMPL_PROPERTY(type, name, member) \
|
||||
type get##name() const \
|
||||
{ \
|
||||
return member; \
|
||||
} \
|
||||
void set##name(type val) \
|
||||
{ \
|
||||
member = val; \
|
||||
}
|
||||
|
||||
#define CV_IMPL_PROPERTY_S(type, name, member) \
|
||||
type get##name() const \
|
||||
{ \
|
||||
return member; \
|
||||
} \
|
||||
void set##name(const type &val) \
|
||||
{ \
|
||||
member = val; \
|
||||
}
|
||||
|
||||
#define CV_IMPL_PROPERTY_RO(type, name, member) \
|
||||
type get##name() const \
|
||||
{ \
|
||||
return member; \
|
||||
}
|
||||
inline type get##name() const { return member; }
|
||||
|
||||
#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
|
||||
CV_IMPL_PROPERTY_RO(r_type, name, member) \
|
||||
inline void set##name(w_type val) { member = val; }
|
||||
|
||||
#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
|
||||
r_type get##name() const { return internal_obj.get##internal_name(); } \
|
||||
void set##name(w_type val) { internal_obj.set##internal_name(val); }
|
||||
|
||||
#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
|
||||
#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
|
||||
|
||||
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
|
||||
#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
|
||||
|
||||
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
|
||||
#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
|
||||
|
||||
struct Param {
|
||||
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7,
|
||||
|
@ -449,40 +449,33 @@ classes 0 and 1, one can determine that the given data instance belongs to class
|
||||
\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ .
|
||||
|
||||
In Logistic Regression, choosing the right parameters is of utmost importance for reducing the
|
||||
training error and ensuring high training accuracy. cv::ml::LogisticRegression::Params is the
|
||||
structure that defines parameters that are required to train a Logistic Regression classifier.
|
||||
training error and ensuring high training accuracy:
|
||||
|
||||
The learning rate is determined by cv::ml::LogisticRegression::Params.alpha. It determines how fast
|
||||
we approach the solution. It is a positive real number.
|
||||
- The learning rate can be set with @ref cv::ml::LogisticRegression::setLearningRate "setLearningRate"
|
||||
method. It determines how fast we approach the solution. It is a positive real number.
|
||||
|
||||
Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported in
|
||||
LogisticRegression. It is important that we mention the number of iterations these optimization
|
||||
algorithms have to run. The number of iterations are mentioned by
|
||||
cv::ml::LogisticRegression::Params.num_iters. The number of iterations can be thought as number of
|
||||
steps taken and learning rate specifies if it is a long step or a short step. These two parameters
|
||||
define how fast we arrive at a possible solution.
|
||||
- Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported
|
||||
in LogisticRegression. It is important that we mention the number of iterations these optimization
|
||||
algorithms have to run. The number of iterations can be set with @ref
|
||||
cv::ml::LogisticRegression::setIterations "setIterations". This parameter can be thought
|
||||
as number of steps taken and learning rate specifies if it is a long step or a short step. This
|
||||
and previous parameter define how fast we arrive at a possible solution.
|
||||
|
||||
In order to compensate for overfitting regularization is performed, which can be enabled by setting
|
||||
cv::ml::LogisticRegression::Params.regularized to a positive integer (greater than zero). One can
|
||||
specify what kind of regularization has to be performed by setting
|
||||
cv::ml::LogisticRegression::Params.norm to REG_L1 or REG_L2 values.
|
||||
- In order to compensate for overfitting regularization is performed, which can be enabled with
|
||||
@ref cv::ml::LogisticRegression::setRegularization "setRegularization". One can specify what
|
||||
kind of regularization has to be performed by passing one of @ref
|
||||
cv::ml::LogisticRegression::RegKinds "regularization kinds" to this method.
|
||||
|
||||
LogisticRegression provides a choice of 2 training methods with Batch Gradient Descent or the Mini-
|
||||
Batch Gradient Descent. To specify this, set cv::ml::LogisticRegression::Params::train_method to
|
||||
either BATCH or MINI_BATCH. If training method is set to MINI_BATCH, the size of the mini batch has
|
||||
to be to a postive integer using cv::ml::LogisticRegression::Params::mini_batch_size.
|
||||
- Logistic regression implementation provides a choice of 2 training methods with Batch Gradient
|
||||
Descent or the MiniBatch Gradient Descent. To specify this, call @ref
|
||||
cv::ml::LogisticRegression::setTrainMethod "setTrainMethod" with either @ref
|
||||
cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref
|
||||
cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is
|
||||
set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has
|
||||
to be to a postive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize
|
||||
"setMiniBatchSize".
|
||||
|
||||
A sample set of training parameters for the Logistic Regression classifier can be initialized as
|
||||
follows:
|
||||
@code{.cpp}
|
||||
using namespace cv::ml;
|
||||
LogisticRegression::Params params;
|
||||
params.alpha = 0.5;
|
||||
params.num_iters = 10000;
|
||||
params.norm = LogisticRegression::REG_L2;
|
||||
params.regularized = 1;
|
||||
params.train_method = LogisticRegression::MINI_BATCH;
|
||||
params.mini_batch_size = 10;
|
||||
@endcode
|
||||
A sample set of training parameters for the Logistic Regression classifier can be initialized as follows:
|
||||
@snippet samples/cpp/logistic_regression.cpp init
|
||||
|
||||
@sa cv::ml::LogisticRegression
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -42,84 +42,57 @@
|
||||
|
||||
namespace cv { namespace ml {
|
||||
|
||||
ANN_MLP::Params::Params()
|
||||
struct AnnParams
|
||||
{
|
||||
layerSizes = Mat();
|
||||
activateFunc = SIGMOID_SYM;
|
||||
fparam1 = fparam2 = 0;
|
||||
termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 );
|
||||
trainMethod = RPROP;
|
||||
bpDWScale = bpMomentScale = 0.1;
|
||||
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
|
||||
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
|
||||
}
|
||||
AnnParams()
|
||||
{
|
||||
termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 );
|
||||
trainMethod = ANN_MLP::RPROP;
|
||||
bpDWScale = bpMomentScale = 0.1;
|
||||
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
|
||||
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
|
||||
}
|
||||
|
||||
TermCriteria termCrit;
|
||||
int trainMethod;
|
||||
|
||||
ANN_MLP::Params::Params( const Mat& _layerSizes, int _activateFunc, double _fparam1, double _fparam2,
|
||||
TermCriteria _termCrit, int _trainMethod, double _param1, double _param2 )
|
||||
double bpDWScale;
|
||||
double bpMomentScale;
|
||||
|
||||
double rpDW0;
|
||||
double rpDWPlus;
|
||||
double rpDWMinus;
|
||||
double rpDWMin;
|
||||
double rpDWMax;
|
||||
};
|
||||
|
||||
template <typename T>
|
||||
inline T inBounds(T val, T min_val, T max_val)
|
||||
{
|
||||
layerSizes = _layerSizes;
|
||||
activateFunc = _activateFunc;
|
||||
fparam1 = _fparam1;
|
||||
fparam2 = _fparam2;
|
||||
termCrit = _termCrit;
|
||||
trainMethod = _trainMethod;
|
||||
bpDWScale = bpMomentScale = 0.1;
|
||||
rpDW0 = 1.; rpDWPlus = 1.2; rpDWMinus = 0.5;
|
||||
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
|
||||
|
||||
if( trainMethod == RPROP )
|
||||
{
|
||||
rpDW0 = _param1;
|
||||
if( rpDW0 < FLT_EPSILON )
|
||||
rpDW0 = 1.;
|
||||
rpDWMin = _param2;
|
||||
rpDWMin = std::max( rpDWMin, 0. );
|
||||
}
|
||||
else if( trainMethod == BACKPROP )
|
||||
{
|
||||
bpDWScale = _param1;
|
||||
if( bpDWScale <= 0 )
|
||||
bpDWScale = 0.1;
|
||||
bpDWScale = std::max( bpDWScale, 1e-3 );
|
||||
bpDWScale = std::min( bpDWScale, 1. );
|
||||
bpMomentScale = _param2;
|
||||
if( bpMomentScale < 0 )
|
||||
bpMomentScale = 0.1;
|
||||
bpMomentScale = std::min( bpMomentScale, 1. );
|
||||
}
|
||||
else
|
||||
trainMethod = RPROP;
|
||||
return std::min(std::max(val, min_val), max_val);
|
||||
}
|
||||
|
||||
|
||||
class ANN_MLPImpl : public ANN_MLP
|
||||
{
|
||||
public:
|
||||
ANN_MLPImpl()
|
||||
{
|
||||
clear();
|
||||
}
|
||||
|
||||
ANN_MLPImpl( const Params& p )
|
||||
{
|
||||
clear();
|
||||
setParams(p);
|
||||
setActivationFunction( SIGMOID_SYM, 0, 0 );
|
||||
setLayerSizes(Mat());
|
||||
setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON);
|
||||
}
|
||||
|
||||
virtual ~ANN_MLPImpl() {}
|
||||
|
||||
void setParams(const Params& p)
|
||||
{
|
||||
params = p;
|
||||
create( params.layerSizes );
|
||||
set_activ_func( params.activateFunc, params.fparam1, params.fparam2 );
|
||||
}
|
||||
|
||||
Params getParams() const
|
||||
{
|
||||
return params;
|
||||
}
|
||||
CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.termCrit)
|
||||
CV_IMPL_PROPERTY(double, BackpropWeightScale, params.bpDWScale)
|
||||
CV_IMPL_PROPERTY(double, BackpropMomentumScale, params.bpMomentScale)
|
||||
CV_IMPL_PROPERTY(double, RpropDW0, params.rpDW0)
|
||||
CV_IMPL_PROPERTY(double, RpropDWPlus, params.rpDWPlus)
|
||||
CV_IMPL_PROPERTY(double, RpropDWMinus, params.rpDWMinus)
|
||||
CV_IMPL_PROPERTY(double, RpropDWMin, params.rpDWMin)
|
||||
CV_IMPL_PROPERTY(double, RpropDWMax, params.rpDWMax)
|
||||
|
||||
void clear()
|
||||
{
|
||||
@ -132,7 +105,35 @@ public:
|
||||
|
||||
int layer_count() const { return (int)layer_sizes.size(); }
|
||||
|
||||
void set_activ_func( int _activ_func, double _f_param1, double _f_param2 )
|
||||
void setTrainMethod(int method, double param1, double param2)
|
||||
{
|
||||
if (method != ANN_MLP::RPROP && method != ANN_MLP::BACKPROP)
|
||||
method = ANN_MLP::RPROP;
|
||||
params.trainMethod = method;
|
||||
if(method == ANN_MLP::RPROP )
|
||||
{
|
||||
if( param1 < FLT_EPSILON )
|
||||
param1 = 1.;
|
||||
params.rpDW0 = param1;
|
||||
params.rpDWMin = std::max( param2, 0. );
|
||||
}
|
||||
else if(method == ANN_MLP::BACKPROP )
|
||||
{
|
||||
if( param1 <= 0 )
|
||||
param1 = 0.1;
|
||||
params.bpDWScale = inBounds<double>(param1, 1e-3, 1.);
|
||||
if( param2 < 0 )
|
||||
param2 = 0.1;
|
||||
params.bpMomentScale = std::min( param2, 1. );
|
||||
}
|
||||
}
|
||||
|
||||
int getTrainMethod() const
|
||||
{
|
||||
return params.trainMethod;
|
||||
}
|
||||
|
||||
void setActivationFunction(int _activ_func, double _f_param1, double _f_param2 )
|
||||
{
|
||||
if( _activ_func < 0 || _activ_func > GAUSSIAN )
|
||||
CV_Error( CV_StsOutOfRange, "Unknown activation function" );
|
||||
@ -201,7 +202,12 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void create( InputArray _layer_sizes )
|
||||
Mat getLayerSizes() const
|
||||
{
|
||||
return Mat_<int>(layer_sizes, true);
|
||||
}
|
||||
|
||||
void setLayerSizes( InputArray _layer_sizes )
|
||||
{
|
||||
clear();
|
||||
|
||||
@ -700,7 +706,7 @@ public:
|
||||
termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1);
|
||||
termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON);
|
||||
|
||||
int iter = params.trainMethod == Params::BACKPROP ?
|
||||
int iter = params.trainMethod == ANN_MLP::BACKPROP ?
|
||||
train_backprop( inputs, outputs, sw, termcrit ) :
|
||||
train_rprop( inputs, outputs, sw, termcrit );
|
||||
|
||||
@ -1113,13 +1119,13 @@ public:
|
||||
fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1;
|
||||
|
||||
fs << "training_params" << "{";
|
||||
if( params.trainMethod == Params::BACKPROP )
|
||||
if( params.trainMethod == ANN_MLP::BACKPROP )
|
||||
{
|
||||
fs << "train_method" << "BACKPROP";
|
||||
fs << "dw_scale" << params.bpDWScale;
|
||||
fs << "moment_scale" << params.bpMomentScale;
|
||||
}
|
||||
else if( params.trainMethod == Params::RPROP )
|
||||
else if( params.trainMethod == ANN_MLP::RPROP )
|
||||
{
|
||||
fs << "train_method" << "RPROP";
|
||||
fs << "dw0" << params.rpDW0;
|
||||
@ -1186,7 +1192,7 @@ public:
|
||||
f_param1 = (double)fn["f_param1"];
|
||||
f_param2 = (double)fn["f_param2"];
|
||||
|
||||
set_activ_func( activ_func, f_param1, f_param2 );
|
||||
setActivationFunction( activ_func, f_param1, f_param2 );
|
||||
|
||||
min_val = (double)fn["min_val"];
|
||||
max_val = (double)fn["max_val"];
|
||||
@ -1194,7 +1200,7 @@ public:
|
||||
max_val1 = (double)fn["max_val1"];
|
||||
|
||||
FileNode tpn = fn["training_params"];
|
||||
params = Params();
|
||||
params = AnnParams();
|
||||
|
||||
if( !tpn.empty() )
|
||||
{
|
||||
@ -1202,13 +1208,13 @@ public:
|
||||
|
||||
if( tmethod_name == "BACKPROP" )
|
||||
{
|
||||
params.trainMethod = Params::BACKPROP;
|
||||
params.trainMethod = ANN_MLP::BACKPROP;
|
||||
params.bpDWScale = (double)tpn["dw_scale"];
|
||||
params.bpMomentScale = (double)tpn["moment_scale"];
|
||||
}
|
||||
else if( tmethod_name == "RPROP" )
|
||||
{
|
||||
params.trainMethod = Params::RPROP;
|
||||
params.trainMethod = ANN_MLP::RPROP;
|
||||
params.rpDW0 = (double)tpn["dw0"];
|
||||
params.rpDWPlus = (double)tpn["dw_plus"];
|
||||
params.rpDWMinus = (double)tpn["dw_minus"];
|
||||
@ -1244,7 +1250,7 @@ public:
|
||||
|
||||
vector<int> _layer_sizes;
|
||||
readVectorOrMat(fn["layer_sizes"], _layer_sizes);
|
||||
create( _layer_sizes );
|
||||
setLayerSizes( _layer_sizes );
|
||||
|
||||
int i, l_count = layer_count();
|
||||
read_params(fn);
|
||||
@ -1267,11 +1273,6 @@ public:
|
||||
trained = true;
|
||||
}
|
||||
|
||||
Mat getLayerSizes() const
|
||||
{
|
||||
return Mat_<int>(layer_sizes, true);
|
||||
}
|
||||
|
||||
Mat getWeights(int layerIdx) const
|
||||
{
|
||||
CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() );
|
||||
@ -1304,17 +1305,16 @@ public:
|
||||
double min_val, max_val, min_val1, max_val1;
|
||||
int activ_func;
|
||||
int max_lsize, max_buf_sz;
|
||||
Params params;
|
||||
AnnParams params;
|
||||
RNG rng;
|
||||
Mutex mtx;
|
||||
bool trained;
|
||||
};
|
||||
|
||||
|
||||
Ptr<ANN_MLP> ANN_MLP::create(const ANN_MLP::Params& params)
|
||||
Ptr<ANN_MLP> ANN_MLP::create()
|
||||
{
|
||||
Ptr<ANN_MLPImpl> ann = makePtr<ANN_MLPImpl>(params);
|
||||
return ann;
|
||||
return makePtr<ANN_MLPImpl>();
|
||||
}
|
||||
|
||||
}}
|
||||
|
@ -54,48 +54,33 @@ log_ratio( double val )
|
||||
}
|
||||
|
||||
|
||||
Boost::Params::Params()
|
||||
BoostTreeParams::BoostTreeParams()
|
||||
{
|
||||
boostType = Boost::REAL;
|
||||
weakCount = 100;
|
||||
weightTrimRate = 0.95;
|
||||
CVFolds = 0;
|
||||
maxDepth = 1;
|
||||
}
|
||||
|
||||
|
||||
Boost::Params::Params( int _boostType, int _weak_count,
|
||||
double _weightTrimRate, int _maxDepth,
|
||||
bool _use_surrogates, const Mat& _priors )
|
||||
BoostTreeParams::BoostTreeParams( int _boostType, int _weak_count,
|
||||
double _weightTrimRate)
|
||||
{
|
||||
boostType = _boostType;
|
||||
weakCount = _weak_count;
|
||||
weightTrimRate = _weightTrimRate;
|
||||
CVFolds = 0;
|
||||
maxDepth = _maxDepth;
|
||||
useSurrogates = _use_surrogates;
|
||||
priors = _priors;
|
||||
}
|
||||
|
||||
|
||||
class DTreesImplForBoost : public DTreesImpl
|
||||
{
|
||||
public:
|
||||
DTreesImplForBoost() {}
|
||||
DTreesImplForBoost()
|
||||
{
|
||||
params.setCVFolds(0);
|
||||
params.setMaxDepth(1);
|
||||
}
|
||||
virtual ~DTreesImplForBoost() {}
|
||||
|
||||
bool isClassifier() const { return true; }
|
||||
|
||||
void setBParams(const Boost::Params& p)
|
||||
{
|
||||
bparams = p;
|
||||
}
|
||||
|
||||
Boost::Params getBParams() const
|
||||
{
|
||||
return bparams;
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
DTreesImpl::clear();
|
||||
@ -199,10 +184,6 @@ public:
|
||||
|
||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||
{
|
||||
Params dp(bparams.maxDepth, bparams.minSampleCount, bparams.regressionAccuracy,
|
||||
bparams.useSurrogates, bparams.maxCategories, 0,
|
||||
false, false, bparams.priors);
|
||||
setDParams(dp);
|
||||
startTraining(trainData, flags);
|
||||
int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000;
|
||||
vector<int> sidx = w->sidx;
|
||||
@ -426,12 +407,6 @@ public:
|
||||
void readParams( const FileNode& fn )
|
||||
{
|
||||
DTreesImpl::readParams(fn);
|
||||
bparams.maxDepth = params0.maxDepth;
|
||||
bparams.minSampleCount = params0.minSampleCount;
|
||||
bparams.regressionAccuracy = params0.regressionAccuracy;
|
||||
bparams.useSurrogates = params0.useSurrogates;
|
||||
bparams.maxCategories = params0.maxCategories;
|
||||
bparams.priors = params0.priors;
|
||||
|
||||
FileNode tparams_node = fn["training_params"];
|
||||
// check for old layout
|
||||
@ -465,7 +440,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
Boost::Params bparams;
|
||||
BoostTreeParams bparams;
|
||||
vector<double> sumResult;
|
||||
};
|
||||
|
||||
@ -476,6 +451,20 @@ public:
|
||||
BoostImpl() {}
|
||||
virtual ~BoostImpl() {}
|
||||
|
||||
CV_IMPL_PROPERTY(int, BoostType, impl.bparams.boostType)
|
||||
CV_IMPL_PROPERTY(int, WeakCount, impl.bparams.weakCount)
|
||||
CV_IMPL_PROPERTY(double, WeightTrimRate, impl.bparams.weightTrimRate)
|
||||
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
|
||||
|
||||
String getDefaultModelName() const { return "opencv_ml_boost"; }
|
||||
|
||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||
@ -498,9 +487,6 @@ public:
|
||||
impl.read(fn);
|
||||
}
|
||||
|
||||
void setBParams(const Params& p) { impl.setBParams(p); }
|
||||
Params getBParams() const { return impl.getBParams(); }
|
||||
|
||||
int getVarCount() const { return impl.getVarCount(); }
|
||||
|
||||
bool isTrained() const { return impl.isTrained(); }
|
||||
@ -515,11 +501,9 @@ public:
|
||||
};
|
||||
|
||||
|
||||
Ptr<Boost> Boost::create(const Params& params)
|
||||
Ptr<Boost> Boost::create()
|
||||
{
|
||||
Ptr<BoostImpl> p = makePtr<BoostImpl>();
|
||||
p->setBParams(params);
|
||||
return p;
|
||||
return makePtr<BoostImpl>();
|
||||
}
|
||||
|
||||
}}
|
||||
|
@ -48,37 +48,49 @@ namespace ml
|
||||
|
||||
const double minEigenValue = DBL_EPSILON;
|
||||
|
||||
EM::Params::Params(int _nclusters, int _covMatType, const TermCriteria& _termCrit)
|
||||
{
|
||||
nclusters = _nclusters;
|
||||
covMatType = _covMatType;
|
||||
termCrit = _termCrit;
|
||||
}
|
||||
|
||||
class CV_EXPORTS EMImpl : public EM
|
||||
{
|
||||
public:
|
||||
EMImpl(const Params& _params)
|
||||
|
||||
int nclusters;
|
||||
int covMatType;
|
||||
TermCriteria termCrit;
|
||||
|
||||
CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, termCrit)
|
||||
|
||||
void setClustersNumber(int val)
|
||||
{
|
||||
setParams(_params);
|
||||
nclusters = val;
|
||||
CV_Assert(nclusters > 1);
|
||||
}
|
||||
|
||||
int getClustersNumber() const
|
||||
{
|
||||
return nclusters;
|
||||
}
|
||||
|
||||
void setCovarianceMatrixType(int val)
|
||||
{
|
||||
covMatType = val;
|
||||
CV_Assert(covMatType == COV_MAT_SPHERICAL ||
|
||||
covMatType == COV_MAT_DIAGONAL ||
|
||||
covMatType == COV_MAT_GENERIC);
|
||||
}
|
||||
|
||||
int getCovarianceMatrixType() const
|
||||
{
|
||||
return covMatType;
|
||||
}
|
||||
|
||||
EMImpl()
|
||||
{
|
||||
nclusters = DEFAULT_NCLUSTERS;
|
||||
covMatType=EM::COV_MAT_DIAGONAL;
|
||||
termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, 1e-6);
|
||||
}
|
||||
|
||||
virtual ~EMImpl() {}
|
||||
|
||||
void setParams(const Params& _params)
|
||||
{
|
||||
params = _params;
|
||||
CV_Assert(params.nclusters > 1);
|
||||
CV_Assert(params.covMatType == COV_MAT_SPHERICAL ||
|
||||
params.covMatType == COV_MAT_DIAGONAL ||
|
||||
params.covMatType == COV_MAT_GENERIC);
|
||||
}
|
||||
|
||||
Params getParams() const
|
||||
{
|
||||
return params;
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
trainSamples.release();
|
||||
@ -100,10 +112,10 @@ public:
|
||||
bool train(const Ptr<TrainData>& data, int)
|
||||
{
|
||||
Mat samples = data->getTrainSamples(), labels;
|
||||
return train_(samples, labels, noArray(), noArray());
|
||||
return trainEM(samples, labels, noArray(), noArray());
|
||||
}
|
||||
|
||||
bool train_(InputArray samples,
|
||||
bool trainEM(InputArray samples,
|
||||
OutputArray logLikelihoods,
|
||||
OutputArray labels,
|
||||
OutputArray probs)
|
||||
@ -157,7 +169,7 @@ public:
|
||||
{
|
||||
if( _outputs.fixedType() )
|
||||
ptype = _outputs.type();
|
||||
_outputs.create(samples.rows, params.nclusters, ptype);
|
||||
_outputs.create(samples.rows, nclusters, ptype);
|
||||
}
|
||||
else
|
||||
nsamples = std::min(nsamples, 1);
|
||||
@ -193,7 +205,7 @@ public:
|
||||
{
|
||||
if( _probs.fixedType() )
|
||||
ptype = _probs.type();
|
||||
_probs.create(1, params.nclusters, ptype);
|
||||
_probs.create(1, nclusters, ptype);
|
||||
probs = _probs.getMat();
|
||||
}
|
||||
|
||||
@ -311,7 +323,6 @@ public:
|
||||
const std::vector<Mat>* covs0,
|
||||
const Mat* weights0)
|
||||
{
|
||||
int nclusters = params.nclusters, covMatType = params.covMatType;
|
||||
clear();
|
||||
|
||||
checkTrainData(startStep, samples, nclusters, covMatType, probs0, means0, covs0, weights0);
|
||||
@ -350,7 +361,6 @@ public:
|
||||
|
||||
void decomposeCovs()
|
||||
{
|
||||
int nclusters = params.nclusters, covMatType = params.covMatType;
|
||||
CV_Assert(!covs.empty());
|
||||
covsEigenValues.resize(nclusters);
|
||||
if(covMatType == COV_MAT_GENERIC)
|
||||
@ -383,7 +393,6 @@ public:
|
||||
|
||||
void clusterTrainSamples()
|
||||
{
|
||||
int nclusters = params.nclusters;
|
||||
int nsamples = trainSamples.rows;
|
||||
|
||||
// Cluster samples, compute/update means
|
||||
@ -443,7 +452,6 @@ public:
|
||||
|
||||
void computeLogWeightDivDet()
|
||||
{
|
||||
int nclusters = params.nclusters;
|
||||
CV_Assert(!covsEigenValues.empty());
|
||||
|
||||
Mat logWeights;
|
||||
@ -458,7 +466,7 @@ public:
|
||||
double logDetCov = 0.;
|
||||
const int evalCount = static_cast<int>(covsEigenValues[clusterIndex].total());
|
||||
for(int di = 0; di < evalCount; di++)
|
||||
logDetCov += std::log(covsEigenValues[clusterIndex].at<double>(params.covMatType != COV_MAT_SPHERICAL ? di : 0));
|
||||
logDetCov += std::log(covsEigenValues[clusterIndex].at<double>(covMatType != COV_MAT_SPHERICAL ? di : 0));
|
||||
|
||||
logWeightDivDet.at<double>(clusterIndex) = logWeights.at<double>(clusterIndex) - 0.5 * logDetCov;
|
||||
}
|
||||
@ -466,7 +474,6 @@ public:
|
||||
|
||||
bool doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs)
|
||||
{
|
||||
int nclusters = params.nclusters;
|
||||
int dim = trainSamples.cols;
|
||||
// Precompute the empty initial train data in the cases of START_E_STEP and START_AUTO_STEP
|
||||
if(startStep != START_M_STEP)
|
||||
@ -488,9 +495,9 @@ public:
|
||||
mStep();
|
||||
|
||||
double trainLogLikelihood, prevTrainLogLikelihood = 0.;
|
||||
int maxIters = (params.termCrit.type & TermCriteria::MAX_ITER) ?
|
||||
params.termCrit.maxCount : DEFAULT_MAX_ITERS;
|
||||
double epsilon = (params.termCrit.type & TermCriteria::EPS) ? params.termCrit.epsilon : 0.;
|
||||
int maxIters = (termCrit.type & TermCriteria::MAX_ITER) ?
|
||||
termCrit.maxCount : DEFAULT_MAX_ITERS;
|
||||
double epsilon = (termCrit.type & TermCriteria::EPS) ? termCrit.epsilon : 0.;
|
||||
|
||||
for(int iter = 0; ; iter++)
|
||||
{
|
||||
@ -521,12 +528,12 @@ public:
|
||||
covs.resize(nclusters);
|
||||
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
|
||||
{
|
||||
if(params.covMatType == COV_MAT_SPHERICAL)
|
||||
if(covMatType == COV_MAT_SPHERICAL)
|
||||
{
|
||||
covs[clusterIndex].create(dim, dim, CV_64FC1);
|
||||
setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at<double>(0)));
|
||||
}
|
||||
else if(params.covMatType == COV_MAT_DIAGONAL)
|
||||
else if(covMatType == COV_MAT_DIAGONAL)
|
||||
{
|
||||
covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]);
|
||||
}
|
||||
@ -555,7 +562,6 @@ public:
|
||||
// see Alex Smola's blog http://blog.smola.org/page/2 for
|
||||
// details on the log-sum-exp trick
|
||||
|
||||
int nclusters = params.nclusters, covMatType = params.covMatType;
|
||||
int stype = sample.type();
|
||||
CV_Assert(!means.empty());
|
||||
CV_Assert((stype == CV_32F || stype == CV_64F) && (ptype == CV_32F || ptype == CV_64F));
|
||||
@ -621,7 +627,7 @@ public:
|
||||
void eStep()
|
||||
{
|
||||
// Compute probs_ik from means_k, covs_k and weights_k.
|
||||
trainProbs.create(trainSamples.rows, params.nclusters, CV_64FC1);
|
||||
trainProbs.create(trainSamples.rows, nclusters, CV_64FC1);
|
||||
trainLabels.create(trainSamples.rows, 1, CV_32SC1);
|
||||
trainLogLikelihoods.create(trainSamples.rows, 1, CV_64FC1);
|
||||
|
||||
@ -642,8 +648,6 @@ public:
|
||||
void mStep()
|
||||
{
|
||||
// Update means_k, covs_k and weights_k from probs_ik
|
||||
int nclusters = params.nclusters;
|
||||
int covMatType = params.covMatType;
|
||||
int dim = trainSamples.cols;
|
||||
|
||||
// Update weights
|
||||
@ -755,12 +759,12 @@ public:
|
||||
|
||||
void write_params(FileStorage& fs) const
|
||||
{
|
||||
fs << "nclusters" << params.nclusters;
|
||||
fs << "cov_mat_type" << (params.covMatType == COV_MAT_SPHERICAL ? String("spherical") :
|
||||
params.covMatType == COV_MAT_DIAGONAL ? String("diagonal") :
|
||||
params.covMatType == COV_MAT_GENERIC ? String("generic") :
|
||||
format("unknown_%d", params.covMatType));
|
||||
writeTermCrit(fs, params.termCrit);
|
||||
fs << "nclusters" << nclusters;
|
||||
fs << "cov_mat_type" << (covMatType == COV_MAT_SPHERICAL ? String("spherical") :
|
||||
covMatType == COV_MAT_DIAGONAL ? String("diagonal") :
|
||||
covMatType == COV_MAT_GENERIC ? String("generic") :
|
||||
format("unknown_%d", covMatType));
|
||||
writeTermCrit(fs, termCrit);
|
||||
}
|
||||
|
||||
void write(FileStorage& fs) const
|
||||
@ -781,15 +785,13 @@ public:
|
||||
|
||||
void read_params(const FileNode& fn)
|
||||
{
|
||||
Params _params;
|
||||
_params.nclusters = (int)fn["nclusters"];
|
||||
nclusters = (int)fn["nclusters"];
|
||||
String s = (String)fn["cov_mat_type"];
|
||||
_params.covMatType = s == "spherical" ? COV_MAT_SPHERICAL :
|
||||
covMatType = s == "spherical" ? COV_MAT_SPHERICAL :
|
||||
s == "diagonal" ? COV_MAT_DIAGONAL :
|
||||
s == "generic" ? COV_MAT_GENERIC : -1;
|
||||
CV_Assert(_params.covMatType >= 0);
|
||||
_params.termCrit = readTermCrit(fn);
|
||||
setParams(_params);
|
||||
CV_Assert(covMatType >= 0);
|
||||
termCrit = readTermCrit(fn);
|
||||
}
|
||||
|
||||
void read(const FileNode& fn)
|
||||
@ -820,8 +822,6 @@ public:
|
||||
std::copy(covs.begin(), covs.end(), _covs.begin());
|
||||
}
|
||||
|
||||
Params params;
|
||||
|
||||
// all inner matrices have type CV_64FC1
|
||||
Mat trainSamples;
|
||||
Mat trainProbs;
|
||||
@ -838,41 +838,9 @@ public:
|
||||
Mat logWeightDivDet;
|
||||
};
|
||||
|
||||
|
||||
Ptr<EM> EM::train(InputArray samples, OutputArray logLikelihoods,
|
||||
OutputArray labels, OutputArray probs,
|
||||
const EM::Params& params)
|
||||
Ptr<EM> EM::create()
|
||||
{
|
||||
Ptr<EMImpl> em = makePtr<EMImpl>(params);
|
||||
if(!em->train_(samples, logLikelihoods, labels, probs))
|
||||
em.release();
|
||||
return em;
|
||||
}
|
||||
|
||||
Ptr<EM> EM::train_startWithE(InputArray samples, InputArray means0,
|
||||
InputArray covs0, InputArray weights0,
|
||||
OutputArray logLikelihoods, OutputArray labels,
|
||||
OutputArray probs, const EM::Params& params)
|
||||
{
|
||||
Ptr<EMImpl> em = makePtr<EMImpl>(params);
|
||||
if(!em->trainE(samples, means0, covs0, weights0, logLikelihoods, labels, probs))
|
||||
em.release();
|
||||
return em;
|
||||
}
|
||||
|
||||
Ptr<EM> EM::train_startWithM(InputArray samples, InputArray probs0,
|
||||
OutputArray logLikelihoods, OutputArray labels,
|
||||
OutputArray probs, const EM::Params& params)
|
||||
{
|
||||
Ptr<EMImpl> em = makePtr<EMImpl>(params);
|
||||
if(!em->trainM(samples, probs0, logLikelihoods, labels, probs))
|
||||
em.release();
|
||||
return em;
|
||||
}
|
||||
|
||||
Ptr<EM> EM::create(const Params& params)
|
||||
{
|
||||
return makePtr<EMImpl>(params);
|
||||
return makePtr<EMImpl>();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -50,46 +50,33 @@
|
||||
namespace cv {
|
||||
namespace ml {
|
||||
|
||||
KNearest::Params::Params(int k, bool isclassifier_, int Emax_, int algorithmType_) :
|
||||
defaultK(k),
|
||||
isclassifier(isclassifier_),
|
||||
Emax(Emax_),
|
||||
algorithmType(algorithmType_)
|
||||
{
|
||||
}
|
||||
const String NAME_BRUTE_FORCE = "opencv_ml_knn";
|
||||
const String NAME_KDTREE = "opencv_ml_knn_kd";
|
||||
|
||||
class KNearestImpl : public KNearest
|
||||
class Impl
|
||||
{
|
||||
public:
|
||||
KNearestImpl(const Params& p)
|
||||
Impl()
|
||||
{
|
||||
params = p;
|
||||
defaultK = 10;
|
||||
isclassifier = true;
|
||||
Emax = INT_MAX;
|
||||
}
|
||||
|
||||
virtual ~KNearestImpl() {}
|
||||
|
||||
Params getParams() const { return params; }
|
||||
void setParams(const Params& p) { params = p; }
|
||||
|
||||
bool isClassifier() const { return params.isclassifier; }
|
||||
bool isTrained() const { return !samples.empty(); }
|
||||
|
||||
String getDefaultModelName() const { return "opencv_ml_knn"; }
|
||||
|
||||
void clear()
|
||||
{
|
||||
samples.release();
|
||||
responses.release();
|
||||
}
|
||||
|
||||
int getVarCount() const { return samples.cols; }
|
||||
virtual ~Impl() {}
|
||||
virtual String getModelName() const = 0;
|
||||
virtual int getType() const = 0;
|
||||
virtual float findNearest( InputArray _samples, int k,
|
||||
OutputArray _results,
|
||||
OutputArray _neighborResponses,
|
||||
OutputArray _dists ) const = 0;
|
||||
|
||||
bool train( const Ptr<TrainData>& data, int flags )
|
||||
{
|
||||
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
|
||||
Mat new_responses;
|
||||
data->getTrainResponses().convertTo(new_responses, CV_32F);
|
||||
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
|
||||
bool update = (flags & ml::KNearest::UPDATE_MODEL) != 0 && !samples.empty();
|
||||
|
||||
CV_Assert( new_samples.type() == CV_32F );
|
||||
|
||||
@ -106,9 +93,53 @@ public:
|
||||
samples.push_back(new_samples);
|
||||
responses.push_back(new_responses);
|
||||
|
||||
doTrain(samples);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
virtual void doTrain(InputArray points) { (void)points; }
|
||||
|
||||
void clear()
|
||||
{
|
||||
samples.release();
|
||||
responses.release();
|
||||
}
|
||||
|
||||
void read( const FileNode& fn )
|
||||
{
|
||||
clear();
|
||||
isclassifier = (int)fn["is_classifier"] != 0;
|
||||
defaultK = (int)fn["default_k"];
|
||||
|
||||
fn["samples"] >> samples;
|
||||
fn["responses"] >> responses;
|
||||
}
|
||||
|
||||
void write( FileStorage& fs ) const
|
||||
{
|
||||
fs << "is_classifier" << (int)isclassifier;
|
||||
fs << "default_k" << defaultK;
|
||||
|
||||
fs << "samples" << samples;
|
||||
fs << "responses" << responses;
|
||||
}
|
||||
|
||||
public:
|
||||
int defaultK;
|
||||
bool isclassifier;
|
||||
int Emax;
|
||||
|
||||
Mat samples;
|
||||
Mat responses;
|
||||
};
|
||||
|
||||
class BruteForceImpl : public Impl
|
||||
{
|
||||
public:
|
||||
String getModelName() const { return NAME_BRUTE_FORCE; }
|
||||
int getType() const { return ml::KNearest::BRUTE_FORCE; }
|
||||
|
||||
void findNearestCore( const Mat& _samples, int k0, const Range& range,
|
||||
Mat* results, Mat* neighbor_responses,
|
||||
Mat* dists, float* presult ) const
|
||||
@ -199,7 +230,7 @@ public:
|
||||
|
||||
if( results || testidx+range.start == 0 )
|
||||
{
|
||||
if( !params.isclassifier || k == 1 )
|
||||
if( !isclassifier || k == 1 )
|
||||
{
|
||||
float s = 0.f;
|
||||
for( j = 0; j < k; j++ )
|
||||
@ -251,7 +282,7 @@ public:
|
||||
|
||||
struct findKNearestInvoker : public ParallelLoopBody
|
||||
{
|
||||
findKNearestInvoker(const KNearestImpl* _p, int _k, const Mat& __samples,
|
||||
findKNearestInvoker(const BruteForceImpl* _p, int _k, const Mat& __samples,
|
||||
Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult)
|
||||
{
|
||||
p = _p;
|
||||
@ -273,7 +304,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
const KNearestImpl* p;
|
||||
const BruteForceImpl* p;
|
||||
int k;
|
||||
const Mat* _samples;
|
||||
Mat* _results;
|
||||
@ -324,88 +355,18 @@ public:
|
||||
//invoker(Range(0, testcount));
|
||||
return result;
|
||||
}
|
||||
|
||||
float predict(InputArray inputs, OutputArray outputs, int) const
|
||||
{
|
||||
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
|
||||
}
|
||||
|
||||
void write( FileStorage& fs ) const
|
||||
{
|
||||
fs << "is_classifier" << (int)params.isclassifier;
|
||||
fs << "default_k" << params.defaultK;
|
||||
|
||||
fs << "samples" << samples;
|
||||
fs << "responses" << responses;
|
||||
}
|
||||
|
||||
void read( const FileNode& fn )
|
||||
{
|
||||
clear();
|
||||
params.isclassifier = (int)fn["is_classifier"] != 0;
|
||||
params.defaultK = (int)fn["default_k"];
|
||||
|
||||
fn["samples"] >> samples;
|
||||
fn["responses"] >> responses;
|
||||
}
|
||||
|
||||
Mat samples;
|
||||
Mat responses;
|
||||
Params params;
|
||||
};
|
||||
|
||||
|
||||
class KNearestKDTreeImpl : public KNearest
|
||||
class KDTreeImpl : public Impl
|
||||
{
|
||||
public:
|
||||
KNearestKDTreeImpl(const Params& p)
|
||||
String getModelName() const { return NAME_KDTREE; }
|
||||
int getType() const { return ml::KNearest::KDTREE; }
|
||||
|
||||
void doTrain(InputArray points)
|
||||
{
|
||||
params = p;
|
||||
}
|
||||
|
||||
virtual ~KNearestKDTreeImpl() {}
|
||||
|
||||
Params getParams() const { return params; }
|
||||
void setParams(const Params& p) { params = p; }
|
||||
|
||||
bool isClassifier() const { return params.isclassifier; }
|
||||
bool isTrained() const { return !samples.empty(); }
|
||||
|
||||
String getDefaultModelName() const { return "opencv_ml_knn_kd"; }
|
||||
|
||||
void clear()
|
||||
{
|
||||
samples.release();
|
||||
responses.release();
|
||||
}
|
||||
|
||||
int getVarCount() const { return samples.cols; }
|
||||
|
||||
bool train( const Ptr<TrainData>& data, int flags )
|
||||
{
|
||||
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
|
||||
Mat new_responses;
|
||||
data->getTrainResponses().convertTo(new_responses, CV_32F);
|
||||
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
|
||||
|
||||
CV_Assert( new_samples.type() == CV_32F );
|
||||
|
||||
if( !update )
|
||||
{
|
||||
clear();
|
||||
}
|
||||
else
|
||||
{
|
||||
CV_Assert( new_samples.cols == samples.cols &&
|
||||
new_responses.cols == responses.cols );
|
||||
}
|
||||
|
||||
samples.push_back(new_samples);
|
||||
responses.push_back(new_responses);
|
||||
|
||||
tr.build(samples);
|
||||
|
||||
return true;
|
||||
tr.build(points);
|
||||
}
|
||||
|
||||
float findNearest( InputArray _samples, int k,
|
||||
@ -460,51 +421,97 @@ public:
|
||||
{
|
||||
_d = d.row(i);
|
||||
}
|
||||
tr.findNearest(test_samples.row(i), k, params.Emax, _res, _nr, _d, noArray());
|
||||
tr.findNearest(test_samples.row(i), k, Emax, _res, _nr, _d, noArray());
|
||||
}
|
||||
|
||||
return result; // currently always 0
|
||||
}
|
||||
|
||||
float predict(InputArray inputs, OutputArray outputs, int) const
|
||||
KDTree tr;
|
||||
};
|
||||
|
||||
//================================================================
|
||||
|
||||
class KNearestImpl : public KNearest
|
||||
{
|
||||
CV_IMPL_PROPERTY(int, DefaultK, impl->defaultK)
|
||||
CV_IMPL_PROPERTY(bool, IsClassifier, impl->isclassifier)
|
||||
CV_IMPL_PROPERTY(int, Emax, impl->Emax)
|
||||
|
||||
public:
|
||||
int getAlgorithmType() const
|
||||
{
|
||||
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
|
||||
return impl->getType();
|
||||
}
|
||||
void setAlgorithmType(int val)
|
||||
{
|
||||
if (val != BRUTE_FORCE && val != KDTREE)
|
||||
val = BRUTE_FORCE;
|
||||
initImpl(val);
|
||||
}
|
||||
|
||||
public:
|
||||
KNearestImpl()
|
||||
{
|
||||
initImpl(BRUTE_FORCE);
|
||||
}
|
||||
~KNearestImpl()
|
||||
{
|
||||
}
|
||||
|
||||
bool isClassifier() const { return impl->isclassifier; }
|
||||
bool isTrained() const { return !impl->samples.empty(); }
|
||||
|
||||
int getVarCount() const { return impl->samples.cols; }
|
||||
|
||||
void write( FileStorage& fs ) const
|
||||
{
|
||||
fs << "is_classifier" << (int)params.isclassifier;
|
||||
fs << "default_k" << params.defaultK;
|
||||
|
||||
fs << "samples" << samples;
|
||||
fs << "responses" << responses;
|
||||
impl->write(fs);
|
||||
}
|
||||
|
||||
void read( const FileNode& fn )
|
||||
{
|
||||
clear();
|
||||
params.isclassifier = (int)fn["is_classifier"] != 0;
|
||||
params.defaultK = (int)fn["default_k"];
|
||||
|
||||
fn["samples"] >> samples;
|
||||
fn["responses"] >> responses;
|
||||
int algorithmType = BRUTE_FORCE;
|
||||
if (fn.name() == NAME_KDTREE)
|
||||
algorithmType = KDTREE;
|
||||
initImpl(algorithmType);
|
||||
impl->read(fn);
|
||||
}
|
||||
|
||||
KDTree tr;
|
||||
float findNearest( InputArray samples, int k,
|
||||
OutputArray results,
|
||||
OutputArray neighborResponses=noArray(),
|
||||
OutputArray dist=noArray() ) const
|
||||
{
|
||||
return impl->findNearest(samples, k, results, neighborResponses, dist);
|
||||
}
|
||||
|
||||
Mat samples;
|
||||
Mat responses;
|
||||
Params params;
|
||||
float predict(InputArray inputs, OutputArray outputs, int) const
|
||||
{
|
||||
return impl->findNearest( inputs, impl->defaultK, outputs, noArray(), noArray() );
|
||||
}
|
||||
|
||||
bool train( const Ptr<TrainData>& data, int flags )
|
||||
{
|
||||
return impl->train(data, flags);
|
||||
}
|
||||
|
||||
String getDefaultModelName() const { return impl->getModelName(); }
|
||||
|
||||
protected:
|
||||
void initImpl(int algorithmType)
|
||||
{
|
||||
if (algorithmType != KDTREE)
|
||||
impl = makePtr<BruteForceImpl>();
|
||||
else
|
||||
impl = makePtr<KDTreeImpl>();
|
||||
}
|
||||
Ptr<Impl> impl;
|
||||
};
|
||||
|
||||
Ptr<KNearest> KNearest::create(const Params& p)
|
||||
Ptr<KNearest> KNearest::create()
|
||||
{
|
||||
if (KDTREE==p.algorithmType)
|
||||
{
|
||||
return makePtr<KNearestKDTreeImpl>(p);
|
||||
}
|
||||
|
||||
return makePtr<KNearestImpl>(p);
|
||||
return makePtr<KNearestImpl>();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -60,31 +60,41 @@ using namespace std;
|
||||
namespace cv {
|
||||
namespace ml {
|
||||
|
||||
LogisticRegression::Params::Params(double learning_rate,
|
||||
int iters,
|
||||
int method,
|
||||
int normlization,
|
||||
int reg,
|
||||
int batch_size)
|
||||
class LrParams
|
||||
{
|
||||
alpha = learning_rate;
|
||||
num_iters = iters;
|
||||
norm = normlization;
|
||||
regularized = reg;
|
||||
train_method = method;
|
||||
mini_batch_size = batch_size;
|
||||
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
|
||||
}
|
||||
public:
|
||||
LrParams()
|
||||
{
|
||||
alpha = 0.001;
|
||||
num_iters = 1000;
|
||||
norm = LogisticRegression::REG_L2;
|
||||
train_method = LogisticRegression::BATCH;
|
||||
mini_batch_size = 1;
|
||||
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
|
||||
}
|
||||
|
||||
double alpha; //!< learning rate.
|
||||
int num_iters; //!< number of iterations.
|
||||
int norm;
|
||||
int train_method;
|
||||
int mini_batch_size;
|
||||
TermCriteria term_crit;
|
||||
};
|
||||
|
||||
class LogisticRegressionImpl : public LogisticRegression
|
||||
{
|
||||
public:
|
||||
LogisticRegressionImpl(const Params& pms)
|
||||
: params(pms)
|
||||
{
|
||||
}
|
||||
|
||||
LogisticRegressionImpl() { }
|
||||
virtual ~LogisticRegressionImpl() {}
|
||||
|
||||
CV_IMPL_PROPERTY(double, LearningRate, params.alpha)
|
||||
CV_IMPL_PROPERTY(int, Iterations, params.num_iters)
|
||||
CV_IMPL_PROPERTY(int, Regularization, params.norm)
|
||||
CV_IMPL_PROPERTY(int, TrainMethod, params.train_method)
|
||||
CV_IMPL_PROPERTY(int, MiniBatchSize, params.mini_batch_size)
|
||||
CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.term_crit)
|
||||
|
||||
virtual bool train( const Ptr<TrainData>& trainData, int=0 );
|
||||
virtual float predict(InputArray samples, OutputArray results, int) const;
|
||||
virtual void clear();
|
||||
@ -103,7 +113,7 @@ protected:
|
||||
bool set_label_map(const Mat& _labels_i);
|
||||
Mat remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const;
|
||||
protected:
|
||||
Params params;
|
||||
LrParams params;
|
||||
Mat learnt_thetas;
|
||||
map<int, int> forward_mapper;
|
||||
map<int, int> reverse_mapper;
|
||||
@ -111,9 +121,9 @@ protected:
|
||||
Mat labels_n;
|
||||
};
|
||||
|
||||
Ptr<LogisticRegression> LogisticRegression::create(const Params& params)
|
||||
Ptr<LogisticRegression> LogisticRegression::create()
|
||||
{
|
||||
return makePtr<LogisticRegressionImpl>(params);
|
||||
return makePtr<LogisticRegressionImpl>();
|
||||
}
|
||||
|
||||
bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
|
||||
@ -312,7 +322,7 @@ double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels
|
||||
theta_b = _init_theta(Range(1, n), Range::all());
|
||||
multiply(theta_b, theta_b, theta_c, 1);
|
||||
|
||||
if(this->params.regularized > 0)
|
||||
if(params.norm != REG_NONE)
|
||||
{
|
||||
llambda = 1;
|
||||
}
|
||||
@ -367,7 +377,7 @@ Mat LogisticRegressionImpl::compute_batch_gradient(const Mat& _data, const Mat&
|
||||
m = _data.rows;
|
||||
n = _data.cols;
|
||||
|
||||
if(this->params.regularized > 0)
|
||||
if(params.norm != REG_NONE)
|
||||
{
|
||||
llambda = 1;
|
||||
}
|
||||
@ -439,7 +449,7 @@ Mat LogisticRegressionImpl::compute_mini_batch_gradient(const Mat& _data, const
|
||||
Mat data_d;
|
||||
Mat labels_l;
|
||||
|
||||
if(this->params.regularized > 0)
|
||||
if(params.norm != REG_NONE)
|
||||
{
|
||||
lambda_l = 1;
|
||||
}
|
||||
@ -570,7 +580,6 @@ void LogisticRegressionImpl::write(FileStorage& fs) const
|
||||
fs<<"alpha"<<this->params.alpha;
|
||||
fs<<"iterations"<<this->params.num_iters;
|
||||
fs<<"norm"<<this->params.norm;
|
||||
fs<<"regularized"<<this->params.regularized;
|
||||
fs<<"train_method"<<this->params.train_method;
|
||||
if(this->params.train_method == LogisticRegression::MINI_BATCH)
|
||||
{
|
||||
@ -592,7 +601,6 @@ void LogisticRegressionImpl::read(const FileNode& fn)
|
||||
this->params.alpha = (double)fn["alpha"];
|
||||
this->params.num_iters = (int)fn["iterations"];
|
||||
this->params.norm = (int)fn["norm"];
|
||||
this->params.regularized = (int)fn["regularized"];
|
||||
this->params.train_method = (int)fn["train_method"];
|
||||
|
||||
if(this->params.train_method == LogisticRegression::MINI_BATCH)
|
||||
|
@ -43,7 +43,6 @@
|
||||
namespace cv {
|
||||
namespace ml {
|
||||
|
||||
NormalBayesClassifier::Params::Params() {}
|
||||
|
||||
class NormalBayesClassifierImpl : public NormalBayesClassifier
|
||||
{
|
||||
@ -53,9 +52,6 @@ public:
|
||||
nallvars = 0;
|
||||
}
|
||||
|
||||
void setParams(const Params&) {}
|
||||
Params getParams() const { return Params(); }
|
||||
|
||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||
{
|
||||
const float min_variation = FLT_EPSILON;
|
||||
@ -455,7 +451,7 @@ public:
|
||||
};
|
||||
|
||||
|
||||
Ptr<NormalBayesClassifier> NormalBayesClassifier::create(const Params&)
|
||||
Ptr<NormalBayesClassifier> NormalBayesClassifier::create()
|
||||
{
|
||||
Ptr<NormalBayesClassifierImpl> p = makePtr<NormalBayesClassifierImpl>();
|
||||
return p;
|
||||
|
@ -120,6 +120,91 @@ namespace ml
|
||||
return termCrit;
|
||||
}
|
||||
|
||||
struct TreeParams
|
||||
{
|
||||
TreeParams();
|
||||
TreeParams( int maxDepth, int minSampleCount,
|
||||
double regressionAccuracy, bool useSurrogates,
|
||||
int maxCategories, int CVFolds,
|
||||
bool use1SERule, bool truncatePrunedTree,
|
||||
const Mat& priors );
|
||||
|
||||
inline void setMaxCategories(int val)
|
||||
{
|
||||
if( val < 2 )
|
||||
CV_Error( CV_StsOutOfRange, "max_categories should be >= 2" );
|
||||
maxCategories = std::min(val, 15 );
|
||||
}
|
||||
inline void setMaxDepth(int val)
|
||||
{
|
||||
if( val < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "max_depth should be >= 0" );
|
||||
maxDepth = std::min( val, 25 );
|
||||
}
|
||||
inline void setMinSampleCount(int val)
|
||||
{
|
||||
minSampleCount = std::max(val, 1);
|
||||
}
|
||||
inline void setCVFolds(int val)
|
||||
{
|
||||
if( val < 0 )
|
||||
CV_Error( CV_StsOutOfRange,
|
||||
"params.CVFolds should be =0 (the tree is not pruned) "
|
||||
"or n>0 (tree is pruned using n-fold cross-validation)" );
|
||||
if( val == 1 )
|
||||
val = 0;
|
||||
CVFolds = val;
|
||||
}
|
||||
inline void setRegressionAccuracy(float val)
|
||||
{
|
||||
if( val < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
|
||||
regressionAccuracy = val;
|
||||
}
|
||||
|
||||
inline int getMaxCategories() const { return maxCategories; }
|
||||
inline int getMaxDepth() const { return maxDepth; }
|
||||
inline int getMinSampleCount() const { return minSampleCount; }
|
||||
inline int getCVFolds() const { return CVFolds; }
|
||||
inline float getRegressionAccuracy() const { return regressionAccuracy; }
|
||||
|
||||
CV_IMPL_PROPERTY(bool, UseSurrogates, useSurrogates)
|
||||
CV_IMPL_PROPERTY(bool, Use1SERule, use1SERule)
|
||||
CV_IMPL_PROPERTY(bool, TruncatePrunedTree, truncatePrunedTree)
|
||||
CV_IMPL_PROPERTY_S(cv::Mat, Priors, priors)
|
||||
|
||||
public:
|
||||
bool useSurrogates;
|
||||
bool use1SERule;
|
||||
bool truncatePrunedTree;
|
||||
Mat priors;
|
||||
|
||||
protected:
|
||||
int maxCategories;
|
||||
int maxDepth;
|
||||
int minSampleCount;
|
||||
int CVFolds;
|
||||
float regressionAccuracy;
|
||||
};
|
||||
|
||||
struct RTreeParams
|
||||
{
|
||||
RTreeParams();
|
||||
RTreeParams(bool calcVarImportance, int nactiveVars, TermCriteria termCrit );
|
||||
bool calcVarImportance;
|
||||
int nactiveVars;
|
||||
TermCriteria termCrit;
|
||||
};
|
||||
|
||||
struct BoostTreeParams
|
||||
{
|
||||
BoostTreeParams();
|
||||
BoostTreeParams(int boostType, int weakCount, double weightTrimRate);
|
||||
int boostType;
|
||||
int weakCount;
|
||||
double weightTrimRate;
|
||||
};
|
||||
|
||||
class DTreesImpl : public DTrees
|
||||
{
|
||||
public:
|
||||
@ -191,6 +276,16 @@ namespace ml
|
||||
int maxSubsetSize;
|
||||
};
|
||||
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxCategories, params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxDepth, params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, params)
|
||||
CV_WRAP_SAME_PROPERTY(int, CVFolds, params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, params)
|
||||
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, params)
|
||||
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, params)
|
||||
|
||||
DTreesImpl();
|
||||
virtual ~DTreesImpl();
|
||||
virtual void clear();
|
||||
@ -202,8 +297,7 @@ namespace ml
|
||||
int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; }
|
||||
int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; }
|
||||
|
||||
virtual void setDParams(const Params& _params);
|
||||
virtual Params getDParams() const;
|
||||
virtual void setDParams(const TreeParams& _params);
|
||||
virtual void startTraining( const Ptr<TrainData>& trainData, int flags );
|
||||
virtual void endTraining();
|
||||
virtual void initCompVarIdx();
|
||||
@ -250,7 +344,7 @@ namespace ml
|
||||
virtual const std::vector<Split>& getSplits() const { return splits; }
|
||||
virtual const std::vector<int>& getSubsets() const { return subsets; }
|
||||
|
||||
Params params0, params;
|
||||
TreeParams params;
|
||||
|
||||
vector<int> varIdx;
|
||||
vector<int> compVarIdx;
|
||||
|
@ -48,21 +48,16 @@ namespace ml {
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
// Random trees //
|
||||
//////////////////////////////////////////////////////////////////////////////////////////
|
||||
RTrees::Params::Params()
|
||||
: DTrees::Params(5, 10, 0.f, false, 10, 0, false, false, Mat())
|
||||
RTreeParams::RTreeParams()
|
||||
{
|
||||
calcVarImportance = false;
|
||||
nactiveVars = 0;
|
||||
termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1);
|
||||
}
|
||||
|
||||
RTrees::Params::Params( int _maxDepth, int _minSampleCount,
|
||||
double _regressionAccuracy, bool _useSurrogates,
|
||||
int _maxCategories, const Mat& _priors,
|
||||
bool _calcVarImportance, int _nactiveVars,
|
||||
TermCriteria _termCrit )
|
||||
: DTrees::Params(_maxDepth, _minSampleCount, _regressionAccuracy, _useSurrogates,
|
||||
_maxCategories, 0, false, false, _priors)
|
||||
RTreeParams::RTreeParams(bool _calcVarImportance,
|
||||
int _nactiveVars,
|
||||
TermCriteria _termCrit )
|
||||
{
|
||||
calcVarImportance = _calcVarImportance;
|
||||
nactiveVars = _nactiveVars;
|
||||
@ -73,19 +68,20 @@ RTrees::Params::Params( int _maxDepth, int _minSampleCount,
|
||||
class DTreesImplForRTrees : public DTreesImpl
|
||||
{
|
||||
public:
|
||||
DTreesImplForRTrees() {}
|
||||
DTreesImplForRTrees()
|
||||
{
|
||||
params.setMaxDepth(5);
|
||||
params.setMinSampleCount(10);
|
||||
params.setRegressionAccuracy(0.f);
|
||||
params.useSurrogates = false;
|
||||
params.setMaxCategories(10);
|
||||
params.setCVFolds(0);
|
||||
params.use1SERule = false;
|
||||
params.truncatePrunedTree = false;
|
||||
params.priors = Mat();
|
||||
}
|
||||
virtual ~DTreesImplForRTrees() {}
|
||||
|
||||
void setRParams(const RTrees::Params& p)
|
||||
{
|
||||
rparams = p;
|
||||
}
|
||||
|
||||
RTrees::Params getRParams() const
|
||||
{
|
||||
return rparams;
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
DTreesImpl::clear();
|
||||
@ -129,10 +125,6 @@ public:
|
||||
|
||||
bool train( const Ptr<TrainData>& trainData, int flags )
|
||||
{
|
||||
Params dp(rparams.maxDepth, rparams.minSampleCount, rparams.regressionAccuracy,
|
||||
rparams.useSurrogates, rparams.maxCategories, rparams.CVFolds,
|
||||
rparams.use1SERule, rparams.truncatePrunedTree, rparams.priors);
|
||||
setDParams(dp);
|
||||
startTraining(trainData, flags);
|
||||
int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ?
|
||||
rparams.termCrit.maxCount : 10000;
|
||||
@ -326,12 +318,6 @@ public:
|
||||
void readParams( const FileNode& fn )
|
||||
{
|
||||
DTreesImpl::readParams(fn);
|
||||
rparams.maxDepth = params0.maxDepth;
|
||||
rparams.minSampleCount = params0.minSampleCount;
|
||||
rparams.regressionAccuracy = params0.regressionAccuracy;
|
||||
rparams.useSurrogates = params0.useSurrogates;
|
||||
rparams.maxCategories = params0.maxCategories;
|
||||
rparams.priors = params0.priors;
|
||||
|
||||
FileNode tparams_node = fn["training_params"];
|
||||
rparams.nactiveVars = (int)tparams_node["nactive_vars"];
|
||||
@ -361,7 +347,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
RTrees::Params rparams;
|
||||
RTreeParams rparams;
|
||||
double oobError;
|
||||
vector<float> varImportance;
|
||||
vector<int> allVars, activeVars;
|
||||
@ -372,6 +358,20 @@ public:
|
||||
class RTreesImpl : public RTrees
|
||||
{
|
||||
public:
|
||||
CV_IMPL_PROPERTY(bool, CalculateVarImportance, impl.rparams.calcVarImportance)
|
||||
CV_IMPL_PROPERTY(int, ActiveVarCount, impl.rparams.nactiveVars)
|
||||
CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, impl.rparams.termCrit)
|
||||
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
|
||||
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
|
||||
|
||||
RTreesImpl() {}
|
||||
virtual ~RTreesImpl() {}
|
||||
|
||||
@ -397,9 +397,6 @@ public:
|
||||
impl.read(fn);
|
||||
}
|
||||
|
||||
void setRParams(const Params& p) { impl.setRParams(p); }
|
||||
Params getRParams() const { return impl.getRParams(); }
|
||||
|
||||
Mat getVarImportance() const { return Mat_<float>(impl.varImportance, true); }
|
||||
int getVarCount() const { return impl.getVarCount(); }
|
||||
|
||||
@ -415,11 +412,9 @@ public:
|
||||
};
|
||||
|
||||
|
||||
Ptr<RTrees> RTrees::create(const Params& params)
|
||||
Ptr<RTrees> RTrees::create()
|
||||
{
|
||||
Ptr<RTreesImpl> p = makePtr<RTreesImpl>();
|
||||
p->setRParams(params);
|
||||
return p;
|
||||
return makePtr<RTreesImpl>();
|
||||
}
|
||||
|
||||
}}
|
||||
|
@ -103,54 +103,60 @@ static void checkParamGrid(const ParamGrid& pg)
|
||||
}
|
||||
|
||||
// SVM training parameters
|
||||
SVM::Params::Params()
|
||||
struct SvmParams
|
||||
{
|
||||
svmType = SVM::C_SVC;
|
||||
kernelType = SVM::RBF;
|
||||
degree = 0;
|
||||
gamma = 1;
|
||||
coef0 = 0;
|
||||
C = 1;
|
||||
nu = 0;
|
||||
p = 0;
|
||||
termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON );
|
||||
}
|
||||
int svmType;
|
||||
int kernelType;
|
||||
double gamma;
|
||||
double coef0;
|
||||
double degree;
|
||||
double C;
|
||||
double nu;
|
||||
double p;
|
||||
Mat classWeights;
|
||||
TermCriteria termCrit;
|
||||
|
||||
SvmParams()
|
||||
{
|
||||
svmType = SVM::C_SVC;
|
||||
kernelType = SVM::RBF;
|
||||
degree = 0;
|
||||
gamma = 1;
|
||||
coef0 = 0;
|
||||
C = 1;
|
||||
nu = 0;
|
||||
p = 0;
|
||||
termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON );
|
||||
}
|
||||
|
||||
SVM::Params::Params( int _svmType, int _kernelType,
|
||||
double _degree, double _gamma, double _coef0,
|
||||
double _Con, double _nu, double _p,
|
||||
const Mat& _classWeights, TermCriteria _termCrit )
|
||||
{
|
||||
svmType = _svmType;
|
||||
kernelType = _kernelType;
|
||||
degree = _degree;
|
||||
gamma = _gamma;
|
||||
coef0 = _coef0;
|
||||
C = _Con;
|
||||
nu = _nu;
|
||||
p = _p;
|
||||
classWeights = _classWeights;
|
||||
termCrit = _termCrit;
|
||||
}
|
||||
SvmParams( int _svmType, int _kernelType,
|
||||
double _degree, double _gamma, double _coef0,
|
||||
double _Con, double _nu, double _p,
|
||||
const Mat& _classWeights, TermCriteria _termCrit )
|
||||
{
|
||||
svmType = _svmType;
|
||||
kernelType = _kernelType;
|
||||
degree = _degree;
|
||||
gamma = _gamma;
|
||||
coef0 = _coef0;
|
||||
C = _Con;
|
||||
nu = _nu;
|
||||
p = _p;
|
||||
classWeights = _classWeights;
|
||||
termCrit = _termCrit;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/////////////////////////////////////// SVM kernel ///////////////////////////////////////
|
||||
class SVMKernelImpl : public SVM::Kernel
|
||||
{
|
||||
public:
|
||||
SVMKernelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
SVMKernelImpl( const SVM::Params& _params )
|
||||
SVMKernelImpl( const SvmParams& _params = SvmParams() )
|
||||
{
|
||||
params = _params;
|
||||
}
|
||||
|
||||
virtual ~SVMKernelImpl()
|
||||
{
|
||||
}
|
||||
|
||||
int getType() const
|
||||
{
|
||||
return params.kernelType;
|
||||
@ -327,7 +333,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
SVM::Params params;
|
||||
SvmParams params;
|
||||
};
|
||||
|
||||
|
||||
@ -1185,7 +1191,7 @@ public:
|
||||
int cache_size;
|
||||
int max_cache_size;
|
||||
Mat samples;
|
||||
SVM::Params params;
|
||||
SvmParams params;
|
||||
vector<KernelRow> lru_cache;
|
||||
int lru_first;
|
||||
int lru_last;
|
||||
@ -1215,6 +1221,7 @@ public:
|
||||
SVMImpl()
|
||||
{
|
||||
clear();
|
||||
checkParams();
|
||||
}
|
||||
|
||||
~SVMImpl()
|
||||
@ -1235,33 +1242,69 @@ public:
|
||||
return sv;
|
||||
}
|
||||
|
||||
void setParams( const Params& _params, const Ptr<Kernel>& _kernel )
|
||||
CV_IMPL_PROPERTY(int, Type, params.svmType)
|
||||
CV_IMPL_PROPERTY(double, Gamma, params.gamma)
|
||||
CV_IMPL_PROPERTY(double, Coef0, params.coef0)
|
||||
CV_IMPL_PROPERTY(double, Degree, params.degree)
|
||||
CV_IMPL_PROPERTY(double, C, params.C)
|
||||
CV_IMPL_PROPERTY(double, Nu, params.nu)
|
||||
CV_IMPL_PROPERTY(double, P, params.p)
|
||||
CV_IMPL_PROPERTY_S(cv::Mat, ClassWeights, params.classWeights)
|
||||
CV_IMPL_PROPERTY_S(cv::TermCriteria, TermCriteria, params.termCrit)
|
||||
|
||||
int getKernelType() const
|
||||
{
|
||||
params = _params;
|
||||
return params.kernelType;
|
||||
}
|
||||
|
||||
void setKernel(int kernelType)
|
||||
{
|
||||
params.kernelType = kernelType;
|
||||
if (kernelType != CUSTOM)
|
||||
kernel = makePtr<SVMKernelImpl>(params);
|
||||
}
|
||||
|
||||
void setCustomKernel(const Ptr<Kernel> &_kernel)
|
||||
{
|
||||
params.kernelType = CUSTOM;
|
||||
kernel = _kernel;
|
||||
}
|
||||
|
||||
void checkParams()
|
||||
{
|
||||
int kernelType = params.kernelType;
|
||||
if (kernelType != CUSTOM)
|
||||
{
|
||||
if( kernelType != LINEAR && kernelType != POLY &&
|
||||
kernelType != SIGMOID && kernelType != RBF &&
|
||||
kernelType != INTER && kernelType != CHI2)
|
||||
CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" );
|
||||
|
||||
if( kernelType == LINEAR )
|
||||
params.gamma = 1;
|
||||
else if( params.gamma <= 0 )
|
||||
CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" );
|
||||
|
||||
if( kernelType != SIGMOID && kernelType != POLY )
|
||||
params.coef0 = 0;
|
||||
else if( params.coef0 < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <coef0> must be positive or zero" );
|
||||
|
||||
if( kernelType != POLY )
|
||||
params.degree = 0;
|
||||
else if( params.degree <= 0 )
|
||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <degree> must be positive" );
|
||||
|
||||
kernel = makePtr<SVMKernelImpl>(params);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (!kernel)
|
||||
CV_Error( CV_StsBadArg, "Custom kernel is not set" );
|
||||
}
|
||||
|
||||
int svmType = params.svmType;
|
||||
|
||||
if( kernelType != LINEAR && kernelType != POLY &&
|
||||
kernelType != SIGMOID && kernelType != RBF &&
|
||||
kernelType != INTER && kernelType != CHI2)
|
||||
CV_Error( CV_StsBadArg, "Unknown/unsupported kernel type" );
|
||||
|
||||
if( kernelType == LINEAR )
|
||||
params.gamma = 1;
|
||||
else if( params.gamma <= 0 )
|
||||
CV_Error( CV_StsOutOfRange, "gamma parameter of the kernel must be positive" );
|
||||
|
||||
if( kernelType != SIGMOID && kernelType != POLY )
|
||||
params.coef0 = 0;
|
||||
else if( params.coef0 < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <coef0> must be positive or zero" );
|
||||
|
||||
if( kernelType != POLY )
|
||||
params.degree = 0;
|
||||
else if( params.degree <= 0 )
|
||||
CV_Error( CV_StsOutOfRange, "The kernel parameter <degree> must be positive" );
|
||||
|
||||
if( svmType != C_SVC && svmType != NU_SVC &&
|
||||
svmType != ONE_CLASS && svmType != EPS_SVR &&
|
||||
svmType != NU_SVR )
|
||||
@ -1285,28 +1328,18 @@ public:
|
||||
if( svmType != C_SVC )
|
||||
params.classWeights.release();
|
||||
|
||||
termCrit = params.termCrit;
|
||||
if( !(termCrit.type & TermCriteria::EPS) )
|
||||
termCrit.epsilon = DBL_EPSILON;
|
||||
termCrit.epsilon = std::max(termCrit.epsilon, DBL_EPSILON);
|
||||
if( !(termCrit.type & TermCriteria::COUNT) )
|
||||
termCrit.maxCount = INT_MAX;
|
||||
termCrit.maxCount = std::max(termCrit.maxCount, 1);
|
||||
|
||||
if( _kernel )
|
||||
kernel = _kernel;
|
||||
else
|
||||
kernel = makePtr<SVMKernelImpl>(params);
|
||||
if( !(params.termCrit.type & TermCriteria::EPS) )
|
||||
params.termCrit.epsilon = DBL_EPSILON;
|
||||
params.termCrit.epsilon = std::max(params.termCrit.epsilon, DBL_EPSILON);
|
||||
if( !(params.termCrit.type & TermCriteria::COUNT) )
|
||||
params.termCrit.maxCount = INT_MAX;
|
||||
params.termCrit.maxCount = std::max(params.termCrit.maxCount, 1);
|
||||
}
|
||||
|
||||
Params getParams() const
|
||||
void setParams( const SvmParams& _params)
|
||||
{
|
||||
return params;
|
||||
}
|
||||
|
||||
Ptr<Kernel> getKernel() const
|
||||
{
|
||||
return kernel;
|
||||
params = _params;
|
||||
checkParams();
|
||||
}
|
||||
|
||||
int getSVCount(int i) const
|
||||
@ -1335,9 +1368,9 @@ public:
|
||||
_responses.convertTo(_yf, CV_32F);
|
||||
|
||||
bool ok =
|
||||
svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, termCrit ) :
|
||||
svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, termCrit ) :
|
||||
svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, termCrit ) : false;
|
||||
svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, params.termCrit ) :
|
||||
svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, params.termCrit ) :
|
||||
svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, params.termCrit ) : false;
|
||||
|
||||
if( !ok )
|
||||
return false;
|
||||
@ -1397,7 +1430,7 @@ public:
|
||||
//check that while cross-validation there were the samples from all the classes
|
||||
if( class_ranges[class_count] <= 0 )
|
||||
CV_Error( CV_StsBadArg, "While cross-validation one or more of the classes have "
|
||||
"been fell out of the sample. Try to enlarge <CvSVMParams::k_fold>" );
|
||||
"been fell out of the sample. Try to enlarge <Params::k_fold>" );
|
||||
|
||||
if( svmType == NU_SVC )
|
||||
{
|
||||
@ -1448,10 +1481,10 @@ public:
|
||||
DecisionFunc df;
|
||||
bool ok = params.svmType == C_SVC ?
|
||||
Solver::solve_c_svc( temp_samples, temp_y, Cp, Cn,
|
||||
kernel, _alpha, sinfo, termCrit ) :
|
||||
kernel, _alpha, sinfo, params.termCrit ) :
|
||||
params.svmType == NU_SVC ?
|
||||
Solver::solve_nu_svc( temp_samples, temp_y, params.nu,
|
||||
kernel, _alpha, sinfo, termCrit ) :
|
||||
kernel, _alpha, sinfo, params.termCrit ) :
|
||||
false;
|
||||
if( !ok )
|
||||
return false;
|
||||
@ -1557,6 +1590,8 @@ public:
|
||||
{
|
||||
clear();
|
||||
|
||||
checkParams();
|
||||
|
||||
int svmType = params.svmType;
|
||||
Mat samples = data->getTrainSamples();
|
||||
Mat responses;
|
||||
@ -1586,6 +1621,8 @@ public:
|
||||
ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid,
|
||||
bool balanced )
|
||||
{
|
||||
checkParams();
|
||||
|
||||
int svmType = params.svmType;
|
||||
RNG rng((uint64)-1);
|
||||
|
||||
@ -1708,7 +1745,7 @@ public:
|
||||
int test_sample_count = (sample_count + k_fold/2)/k_fold;
|
||||
int train_sample_count = sample_count - test_sample_count;
|
||||
|
||||
Params best_params = params;
|
||||
SvmParams best_params = params;
|
||||
double min_error = FLT_MAX;
|
||||
|
||||
int rtype = responses.type();
|
||||
@ -1729,7 +1766,7 @@ public:
|
||||
FOR_IN_GRID(degree, degree_grid)
|
||||
{
|
||||
// make sure we updated the kernel and other parameters
|
||||
setParams(params, Ptr<Kernel>() );
|
||||
setParams(params);
|
||||
|
||||
double error = 0;
|
||||
for( k = 0; k < k_fold; k++ )
|
||||
@ -1919,7 +1956,9 @@ public:
|
||||
kernelType == LINEAR ? "LINEAR" :
|
||||
kernelType == POLY ? "POLY" :
|
||||
kernelType == RBF ? "RBF" :
|
||||
kernelType == SIGMOID ? "SIGMOID" : format("Unknown_%d", kernelType);
|
||||
kernelType == SIGMOID ? "SIGMOID" :
|
||||
kernelType == CHI2 ? "CHI2" :
|
||||
kernelType == INTER ? "INTER" : format("Unknown_%d", kernelType);
|
||||
|
||||
fs << "svmType" << svm_type_str;
|
||||
|
||||
@ -2036,7 +2075,7 @@ public:
|
||||
|
||||
void read_params( const FileNode& fn )
|
||||
{
|
||||
Params _params;
|
||||
SvmParams _params;
|
||||
|
||||
// check for old naming
|
||||
String svm_type_str = (String)(fn["svm_type"].empty() ? fn["svmType"] : fn["svm_type"]);
|
||||
@ -2059,10 +2098,12 @@ public:
|
||||
kernel_type_str == "LINEAR" ? LINEAR :
|
||||
kernel_type_str == "POLY" ? POLY :
|
||||
kernel_type_str == "RBF" ? RBF :
|
||||
kernel_type_str == "SIGMOID" ? SIGMOID : -1;
|
||||
kernel_type_str == "SIGMOID" ? SIGMOID :
|
||||
kernel_type_str == "CHI2" ? CHI2 :
|
||||
kernel_type_str == "INTER" ? INTER : CUSTOM;
|
||||
|
||||
if( kernelType < 0 )
|
||||
CV_Error( CV_StsParseError, "Missing of invalid SVM kernel type" );
|
||||
if( kernelType == CUSTOM )
|
||||
CV_Error( CV_StsParseError, "Invalid SVM kernel type (or custom kernel)" );
|
||||
|
||||
_params.svmType = svmType;
|
||||
_params.kernelType = kernelType;
|
||||
@ -2086,7 +2127,7 @@ public:
|
||||
else
|
||||
_params.termCrit = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 1000, FLT_EPSILON );
|
||||
|
||||
setParams( _params, Ptr<Kernel>() );
|
||||
setParams( _params );
|
||||
}
|
||||
|
||||
void read( const FileNode& fn )
|
||||
@ -2154,8 +2195,7 @@ public:
|
||||
optimize_linear_svm();
|
||||
}
|
||||
|
||||
Params params;
|
||||
TermCriteria termCrit;
|
||||
SvmParams params;
|
||||
Mat class_labels;
|
||||
int var_count;
|
||||
Mat sv;
|
||||
@ -2167,11 +2207,9 @@ public:
|
||||
};
|
||||
|
||||
|
||||
Ptr<SVM> SVM::create(const Params& params, const Ptr<SVM::Kernel>& kernel)
|
||||
Ptr<SVM> SVM::create()
|
||||
{
|
||||
Ptr<SVMImpl> p = makePtr<SVMImpl>();
|
||||
p->setParams(params, kernel);
|
||||
return p;
|
||||
return makePtr<SVMImpl>();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -48,18 +48,7 @@ namespace ml {
|
||||
|
||||
using std::vector;
|
||||
|
||||
void DTrees::setDParams(const DTrees::Params&)
|
||||
{
|
||||
CV_Error(CV_StsNotImplemented, "");
|
||||
}
|
||||
|
||||
DTrees::Params DTrees::getDParams() const
|
||||
{
|
||||
CV_Error(CV_StsNotImplemented, "");
|
||||
return DTrees::Params();
|
||||
}
|
||||
|
||||
DTrees::Params::Params()
|
||||
TreeParams::TreeParams()
|
||||
{
|
||||
maxDepth = INT_MAX;
|
||||
minSampleCount = 10;
|
||||
@ -72,11 +61,11 @@ DTrees::Params::Params()
|
||||
priors = Mat();
|
||||
}
|
||||
|
||||
DTrees::Params::Params( int _maxDepth, int _minSampleCount,
|
||||
double _regressionAccuracy, bool _useSurrogates,
|
||||
int _maxCategories, int _CVFolds,
|
||||
bool _use1SERule, bool _truncatePrunedTree,
|
||||
const Mat& _priors )
|
||||
TreeParams::TreeParams(int _maxDepth, int _minSampleCount,
|
||||
double _regressionAccuracy, bool _useSurrogates,
|
||||
int _maxCategories, int _CVFolds,
|
||||
bool _use1SERule, bool _truncatePrunedTree,
|
||||
const Mat& _priors)
|
||||
{
|
||||
maxDepth = _maxDepth;
|
||||
minSampleCount = _minSampleCount;
|
||||
@ -248,7 +237,7 @@ const vector<int>& DTreesImpl::getActiveVars()
|
||||
|
||||
int DTreesImpl::addTree(const vector<int>& sidx )
|
||||
{
|
||||
size_t n = (params.maxDepth > 0 ? (1 << params.maxDepth) : 1024) + w->wnodes.size();
|
||||
size_t n = (params.getMaxDepth() > 0 ? (1 << params.getMaxDepth()) : 1024) + w->wnodes.size();
|
||||
|
||||
w->wnodes.reserve(n);
|
||||
w->wsplits.reserve(n);
|
||||
@ -257,7 +246,7 @@ int DTreesImpl::addTree(const vector<int>& sidx )
|
||||
w->wsplits.clear();
|
||||
w->wsubsets.clear();
|
||||
|
||||
int cv_n = params.CVFolds;
|
||||
int cv_n = params.getCVFolds();
|
||||
|
||||
if( cv_n > 0 )
|
||||
{
|
||||
@ -347,34 +336,9 @@ int DTreesImpl::addTree(const vector<int>& sidx )
|
||||
return root;
|
||||
}
|
||||
|
||||
DTrees::Params DTreesImpl::getDParams() const
|
||||
void DTreesImpl::setDParams(const TreeParams& _params)
|
||||
{
|
||||
return params0;
|
||||
}
|
||||
|
||||
void DTreesImpl::setDParams(const Params& _params)
|
||||
{
|
||||
params0 = params = _params;
|
||||
if( params.maxCategories < 2 )
|
||||
CV_Error( CV_StsOutOfRange, "params.max_categories should be >= 2" );
|
||||
params.maxCategories = std::min( params.maxCategories, 15 );
|
||||
|
||||
if( params.maxDepth < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "params.max_depth should be >= 0" );
|
||||
params.maxDepth = std::min( params.maxDepth, 25 );
|
||||
|
||||
params.minSampleCount = std::max(params.minSampleCount, 1);
|
||||
|
||||
if( params.CVFolds < 0 )
|
||||
CV_Error( CV_StsOutOfRange,
|
||||
"params.CVFolds should be =0 (the tree is not pruned) "
|
||||
"or n>0 (tree is pruned using n-fold cross-validation)" );
|
||||
|
||||
if( params.CVFolds == 1 )
|
||||
params.CVFolds = 0;
|
||||
|
||||
if( params.regressionAccuracy < 0 )
|
||||
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
|
||||
params = _params;
|
||||
}
|
||||
|
||||
int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
||||
@ -385,7 +349,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
||||
|
||||
node.parent = parent;
|
||||
node.depth = parent >= 0 ? w->wnodes[parent].depth + 1 : 0;
|
||||
int nfolds = params.CVFolds;
|
||||
int nfolds = params.getCVFolds();
|
||||
|
||||
if( nfolds > 0 )
|
||||
{
|
||||
@ -400,7 +364,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
||||
|
||||
calcValue( nidx, sidx );
|
||||
|
||||
if( n <= params.minSampleCount || node.depth >= params.maxDepth )
|
||||
if( n <= params.getMinSampleCount() || node.depth >= params.getMaxDepth() )
|
||||
can_split = false;
|
||||
else if( _isClassifier )
|
||||
{
|
||||
@ -415,7 +379,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
|
||||
}
|
||||
else
|
||||
{
|
||||
if( sqrt(node.node_risk) < params.regressionAccuracy )
|
||||
if( sqrt(node.node_risk) < params.getRegressionAccuracy() )
|
||||
can_split = false;
|
||||
}
|
||||
|
||||
@ -493,7 +457,7 @@ int DTreesImpl::findBestSplit( const vector<int>& _sidx )
|
||||
void DTreesImpl::calcValue( int nidx, const vector<int>& _sidx )
|
||||
{
|
||||
WNode* node = &w->wnodes[nidx];
|
||||
int i, j, k, n = (int)_sidx.size(), cv_n = params.CVFolds;
|
||||
int i, j, k, n = (int)_sidx.size(), cv_n = params.getCVFolds();
|
||||
int m = (int)classLabels.size();
|
||||
|
||||
cv::AutoBuffer<double> buf(std::max(m, 3)*(cv_n+1));
|
||||
@ -841,8 +805,8 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector<int>& _si
|
||||
int m = (int)classLabels.size();
|
||||
|
||||
int base_size = m*(3 + mi) + mi + 1;
|
||||
if( m > 2 && mi > params.maxCategories )
|
||||
base_size += m*std::min(params.maxCategories, n) + mi;
|
||||
if( m > 2 && mi > params.getMaxCategories() )
|
||||
base_size += m*std::min(params.getMaxCategories(), n) + mi;
|
||||
else
|
||||
base_size += mi;
|
||||
AutoBuffer<double> buf(base_size + n);
|
||||
@ -880,9 +844,9 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector<int>& _si
|
||||
|
||||
if( m > 2 )
|
||||
{
|
||||
if( mi > params.maxCategories )
|
||||
if( mi > params.getMaxCategories() )
|
||||
{
|
||||
mi = std::min(params.maxCategories, n);
|
||||
mi = std::min(params.getMaxCategories(), n);
|
||||
cjk = c_weights + _mi;
|
||||
cluster_labels = (int*)(cjk + m*mi);
|
||||
clusterCategories( _cjk, _mi, m, cjk, mi, cluster_labels );
|
||||
@ -1228,7 +1192,7 @@ int DTreesImpl::pruneCV( int root )
|
||||
// 2. choose the best tree index (if need, apply 1SE rule).
|
||||
// 3. store the best index and cut the branches.
|
||||
|
||||
int ti, tree_count = 0, j, cv_n = params.CVFolds, n = w->wnodes[root].sample_count;
|
||||
int ti, tree_count = 0, j, cv_n = params.getCVFolds(), n = w->wnodes[root].sample_count;
|
||||
// currently, 1SE for regression is not implemented
|
||||
bool use_1se = params.use1SERule != 0 && _isClassifier;
|
||||
double min_err = 0, min_err_se = 0;
|
||||
@ -1294,7 +1258,7 @@ int DTreesImpl::pruneCV( int root )
|
||||
|
||||
double DTreesImpl::updateTreeRNC( int root, double T, int fold )
|
||||
{
|
||||
int nidx = root, pidx = -1, cv_n = params.CVFolds;
|
||||
int nidx = root, pidx = -1, cv_n = params.getCVFolds();
|
||||
double min_alpha = DBL_MAX;
|
||||
|
||||
for(;;)
|
||||
@ -1350,7 +1314,7 @@ double DTreesImpl::updateTreeRNC( int root, double T, int fold )
|
||||
|
||||
bool DTreesImpl::cutTree( int root, double T, int fold, double min_alpha )
|
||||
{
|
||||
int cv_n = params.CVFolds, nidx = root, pidx = -1;
|
||||
int cv_n = params.getCVFolds(), nidx = root, pidx = -1;
|
||||
WNode* node = &w->wnodes[root];
|
||||
if( node->left < 0 )
|
||||
return true;
|
||||
@ -1560,19 +1524,19 @@ float DTreesImpl::predict( InputArray _samples, OutputArray _results, int flags
|
||||
|
||||
void DTreesImpl::writeTrainingParams(FileStorage& fs) const
|
||||
{
|
||||
fs << "use_surrogates" << (params0.useSurrogates ? 1 : 0);
|
||||
fs << "max_categories" << params0.maxCategories;
|
||||
fs << "regression_accuracy" << params0.regressionAccuracy;
|
||||
fs << "use_surrogates" << (params.useSurrogates ? 1 : 0);
|
||||
fs << "max_categories" << params.getMaxCategories();
|
||||
fs << "regression_accuracy" << params.getRegressionAccuracy();
|
||||
|
||||
fs << "max_depth" << params0.maxDepth;
|
||||
fs << "min_sample_count" << params0.minSampleCount;
|
||||
fs << "cross_validation_folds" << params0.CVFolds;
|
||||
fs << "max_depth" << params.getMaxDepth();
|
||||
fs << "min_sample_count" << params.getMinSampleCount();
|
||||
fs << "cross_validation_folds" << params.getCVFolds();
|
||||
|
||||
if( params0.CVFolds > 1 )
|
||||
fs << "use_1se_rule" << (params0.use1SERule ? 1 : 0);
|
||||
if( params.getCVFolds() > 1 )
|
||||
fs << "use_1se_rule" << (params.use1SERule ? 1 : 0);
|
||||
|
||||
if( !params0.priors.empty() )
|
||||
fs << "priors" << params0.priors;
|
||||
if( !params.priors.empty() )
|
||||
fs << "priors" << params.priors;
|
||||
}
|
||||
|
||||
void DTreesImpl::writeParams(FileStorage& fs) const
|
||||
@ -1724,18 +1688,18 @@ void DTreesImpl::readParams( const FileNode& fn )
|
||||
|
||||
FileNode tparams_node = fn["training_params"];
|
||||
|
||||
params0 = Params();
|
||||
TreeParams params0 = TreeParams();
|
||||
|
||||
if( !tparams_node.empty() ) // training parameters are not necessary
|
||||
{
|
||||
params0.useSurrogates = (int)tparams_node["use_surrogates"] != 0;
|
||||
params0.maxCategories = (int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"]);
|
||||
params0.regressionAccuracy = (float)tparams_node["regression_accuracy"];
|
||||
params0.maxDepth = (int)tparams_node["max_depth"];
|
||||
params0.minSampleCount = (int)tparams_node["min_sample_count"];
|
||||
params0.CVFolds = (int)tparams_node["cross_validation_folds"];
|
||||
params0.setMaxCategories((int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"]));
|
||||
params0.setRegressionAccuracy((float)tparams_node["regression_accuracy"]);
|
||||
params0.setMaxDepth((int)tparams_node["max_depth"]);
|
||||
params0.setMinSampleCount((int)tparams_node["min_sample_count"]);
|
||||
params0.setCVFolds((int)tparams_node["cross_validation_folds"]);
|
||||
|
||||
if( params0.CVFolds > 1 )
|
||||
if( params0.getCVFolds() > 1 )
|
||||
{
|
||||
params.use1SERule = (int)tparams_node["use_1se_rule"] != 0;
|
||||
}
|
||||
@ -1964,11 +1928,9 @@ void DTreesImpl::read( const FileNode& fn )
|
||||
readTree(fnodes);
|
||||
}
|
||||
|
||||
Ptr<DTrees> DTrees::create(const DTrees::Params& params)
|
||||
Ptr<DTrees> DTrees::create()
|
||||
{
|
||||
Ptr<DTreesImpl> p = makePtr<DTreesImpl>();
|
||||
p->setDParams(params);
|
||||
return p;
|
||||
return makePtr<DTreesImpl>();
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -330,7 +330,8 @@ void CV_KNearestTest::run( int /*start_from*/ )
|
||||
}
|
||||
|
||||
// KNearest KDTree implementation
|
||||
Ptr<KNearest> knearestKdt = KNearest::create(ml::KNearest::Params(10, true, INT_MAX, ml::KNearest::KDTREE));
|
||||
Ptr<KNearest> knearestKdt = KNearest::create();
|
||||
knearestKdt->setAlgorithmType(KNearest::KDTREE);
|
||||
knearestKdt->train(trainData, ml::ROW_SAMPLE, trainLabels);
|
||||
knearestKdt->findNearest(testData, 4, bestLabels);
|
||||
if( !calcErr( bestLabels, testLabels, sizes, err, true ) )
|
||||
@ -394,16 +395,18 @@ int CV_EMTest::runCase( int caseIndex, const EM_Params& params,
|
||||
cv::Mat labels;
|
||||
float err;
|
||||
|
||||
Ptr<EM> em;
|
||||
EM::Params emp(params.nclusters, params.covMatType, params.termCrit);
|
||||
Ptr<EM> em = EM::create();
|
||||
em->setClustersNumber(params.nclusters);
|
||||
em->setCovarianceMatrixType(params.covMatType);
|
||||
em->setTermCriteria(params.termCrit);
|
||||
if( params.startStep == EM::START_AUTO_STEP )
|
||||
em = EM::train( trainData, noArray(), labels, noArray(), emp );
|
||||
em->trainEM( trainData, noArray(), labels, noArray() );
|
||||
else if( params.startStep == EM::START_E_STEP )
|
||||
em = EM::train_startWithE( trainData, *params.means, *params.covs,
|
||||
*params.weights, noArray(), labels, noArray(), emp );
|
||||
em->trainE( trainData, *params.means, *params.covs,
|
||||
*params.weights, noArray(), labels, noArray() );
|
||||
else if( params.startStep == EM::START_M_STEP )
|
||||
em = EM::train_startWithM( trainData, *params.probs,
|
||||
noArray(), labels, noArray(), emp );
|
||||
em->trainM( trainData, *params.probs,
|
||||
noArray(), labels, noArray() );
|
||||
|
||||
// check train error
|
||||
if( !calcErr( labels, trainLabels, sizes, err , false, false ) )
|
||||
@ -543,7 +546,9 @@ protected:
|
||||
|
||||
Mat labels;
|
||||
|
||||
Ptr<EM> em = EM::train(samples, noArray(), labels, noArray(), EM::Params(nclusters));
|
||||
Ptr<EM> em = EM::create();
|
||||
em->setClustersNumber(nclusters);
|
||||
em->trainEM(samples, noArray(), labels, noArray());
|
||||
|
||||
Mat firstResult(samples.rows, 1, CV_32SC1);
|
||||
for( int i = 0; i < samples.rows; i++)
|
||||
@ -644,8 +649,13 @@ protected:
|
||||
samples1.push_back(sample);
|
||||
}
|
||||
}
|
||||
Ptr<EM> model0 = EM::train(samples0, noArray(), noArray(), noArray(), EM::Params(3));
|
||||
Ptr<EM> model1 = EM::train(samples1, noArray(), noArray(), noArray(), EM::Params(3));
|
||||
Ptr<EM> model0 = EM::create();
|
||||
model0->setClustersNumber(3);
|
||||
model0->trainEM(samples0, noArray(), noArray(), noArray());
|
||||
|
||||
Ptr<EM> model1 = EM::create();
|
||||
model1->setClustersNumber(3);
|
||||
model1->trainEM(samples1, noArray(), noArray(), noArray());
|
||||
|
||||
Mat trainConfusionMat(2, 2, CV_32SC1, Scalar(0)),
|
||||
testConfusionMat(2, 2, CV_32SC1, Scalar(0));
|
||||
|
@ -95,16 +95,13 @@ void CV_LRTest::run( int /*start_from*/ )
|
||||
string dataFileName = ts->get_data_path() + "iris.data";
|
||||
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
|
||||
|
||||
LogisticRegression::Params params = LogisticRegression::Params();
|
||||
params.alpha = 1.0;
|
||||
params.num_iters = 10001;
|
||||
params.norm = LogisticRegression::REG_L2;
|
||||
params.regularized = 1;
|
||||
params.train_method = LogisticRegression::BATCH;
|
||||
params.mini_batch_size = 10;
|
||||
|
||||
// run LR classifier train classifier
|
||||
Ptr<LogisticRegression> p = LogisticRegression::create(params);
|
||||
Ptr<LogisticRegression> p = LogisticRegression::create();
|
||||
p->setLearningRate(1.0);
|
||||
p->setIterations(10001);
|
||||
p->setRegularization(LogisticRegression::REG_L2);
|
||||
p->setTrainMethod(LogisticRegression::BATCH);
|
||||
p->setMiniBatchSize(10);
|
||||
p->train(tdata);
|
||||
|
||||
// predict using the same data
|
||||
@ -157,20 +154,17 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ )
|
||||
Mat responses1, responses2;
|
||||
Mat learnt_mat1, learnt_mat2;
|
||||
|
||||
LogisticRegression::Params params1 = LogisticRegression::Params();
|
||||
params1.alpha = 1.0;
|
||||
params1.num_iters = 10001;
|
||||
params1.norm = LogisticRegression::REG_L2;
|
||||
params1.regularized = 1;
|
||||
params1.train_method = LogisticRegression::BATCH;
|
||||
params1.mini_batch_size = 10;
|
||||
|
||||
// train and save the classifier
|
||||
String filename = tempfile(".xml");
|
||||
try
|
||||
{
|
||||
// run LR classifier train classifier
|
||||
Ptr<LogisticRegression> lr1 = LogisticRegression::create(params1);
|
||||
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
|
||||
lr1->setLearningRate(1.0);
|
||||
lr1->setIterations(10001);
|
||||
lr1->setRegularization(LogisticRegression::REG_L2);
|
||||
lr1->setTrainMethod(LogisticRegression::BATCH);
|
||||
lr1->setMiniBatchSize(10);
|
||||
lr1->train(tdata);
|
||||
lr1->predict(tdata->getSamples(), responses1);
|
||||
learnt_mat1 = lr1->get_learnt_thetas();
|
||||
|
@ -73,30 +73,14 @@ int str_to_svm_kernel_type( String& str )
|
||||
return -1;
|
||||
}
|
||||
|
||||
Ptr<SVM> svm_train_auto( Ptr<TrainData> _data, SVM::Params _params,
|
||||
int k_fold, ParamGrid C_grid, ParamGrid gamma_grid,
|
||||
ParamGrid p_grid, ParamGrid nu_grid, ParamGrid coef_grid,
|
||||
ParamGrid degree_grid )
|
||||
{
|
||||
Mat _train_data = _data->getSamples();
|
||||
Mat _responses = _data->getResponses();
|
||||
Mat _var_idx = _data->getVarIdx();
|
||||
Mat _sample_idx = _data->getTrainSampleIdx();
|
||||
|
||||
Ptr<SVM> svm = SVM::create(_params);
|
||||
if( svm->trainAuto( _data, k_fold, C_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid ) )
|
||||
return svm;
|
||||
return Ptr<SVM>();
|
||||
}
|
||||
|
||||
// 4. em
|
||||
// 5. ann
|
||||
int str_to_ann_train_method( String& str )
|
||||
{
|
||||
if( !str.compare("BACKPROP") )
|
||||
return ANN_MLP::Params::BACKPROP;
|
||||
return ANN_MLP::BACKPROP;
|
||||
if( !str.compare("RPROP") )
|
||||
return ANN_MLP::Params::RPROP;
|
||||
return ANN_MLP::RPROP;
|
||||
CV_Error( CV_StsBadArg, "incorrect ann train method string" );
|
||||
return -1;
|
||||
}
|
||||
@ -343,16 +327,16 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
String svm_type_str, kernel_type_str;
|
||||
modelParamsNode["svm_type"] >> svm_type_str;
|
||||
modelParamsNode["kernel_type"] >> kernel_type_str;
|
||||
SVM::Params params;
|
||||
params.svmType = str_to_svm_type( svm_type_str );
|
||||
params.kernelType = str_to_svm_kernel_type( kernel_type_str );
|
||||
modelParamsNode["degree"] >> params.degree;
|
||||
modelParamsNode["gamma"] >> params.gamma;
|
||||
modelParamsNode["coef0"] >> params.coef0;
|
||||
modelParamsNode["C"] >> params.C;
|
||||
modelParamsNode["nu"] >> params.nu;
|
||||
modelParamsNode["p"] >> params.p;
|
||||
model = SVM::create(params);
|
||||
Ptr<SVM> m = SVM::create();
|
||||
m->setType(str_to_svm_type( svm_type_str ));
|
||||
m->setKernel(str_to_svm_kernel_type( kernel_type_str ));
|
||||
m->setDegree(modelParamsNode["degree"]);
|
||||
m->setGamma(modelParamsNode["gamma"]);
|
||||
m->setCoef0(modelParamsNode["coef0"]);
|
||||
m->setC(modelParamsNode["C"]);
|
||||
m->setNu(modelParamsNode["nu"]);
|
||||
m->setP(modelParamsNode["p"]);
|
||||
model = m;
|
||||
}
|
||||
else if( modelName == CV_EM )
|
||||
{
|
||||
@ -371,9 +355,13 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
data->getVarIdx(), data->getTrainSampleIdx());
|
||||
int layer_sz[] = { data->getNAllVars(), 100, 100, (int)cls_map.size() };
|
||||
Mat layer_sizes( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
|
||||
model = ANN_MLP::create(ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0,
|
||||
TermCriteria(TermCriteria::COUNT,300,0.01),
|
||||
str_to_ann_train_method(train_method_str), param1, param2));
|
||||
Ptr<ANN_MLP> m = ANN_MLP::create();
|
||||
m->setLayerSizes(layer_sizes);
|
||||
m->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0);
|
||||
m->setTermCriteria(TermCriteria(TermCriteria::COUNT,300,0.01));
|
||||
m->setTrainMethod(str_to_ann_train_method(train_method_str), param1, param2);
|
||||
model = m;
|
||||
|
||||
}
|
||||
else if( modelName == CV_DTREE )
|
||||
{
|
||||
@ -386,8 +374,18 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
modelParamsNode["max_categories"] >> MAX_CATEGORIES;
|
||||
modelParamsNode["cv_folds"] >> CV_FOLDS;
|
||||
modelParamsNode["is_pruned"] >> IS_PRUNED;
|
||||
model = DTrees::create(DTrees::Params(MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY, USE_SURROGATE,
|
||||
MAX_CATEGORIES, CV_FOLDS, false, IS_PRUNED, Mat() ));
|
||||
|
||||
Ptr<DTrees> m = DTrees::create();
|
||||
m->setMaxDepth(MAX_DEPTH);
|
||||
m->setMinSampleCount(MIN_SAMPLE_COUNT);
|
||||
m->setRegressionAccuracy(REG_ACCURACY);
|
||||
m->setUseSurrogates(USE_SURROGATE);
|
||||
m->setMaxCategories(MAX_CATEGORIES);
|
||||
m->setCVFolds(CV_FOLDS);
|
||||
m->setUse1SERule(false);
|
||||
m->setTruncatePrunedTree(IS_PRUNED);
|
||||
m->setPriors(Mat());
|
||||
model = m;
|
||||
}
|
||||
else if( modelName == CV_BOOST )
|
||||
{
|
||||
@ -401,7 +399,15 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
modelParamsNode["weight_trim_rate"] >> WEIGHT_TRIM_RATE;
|
||||
modelParamsNode["max_depth"] >> MAX_DEPTH;
|
||||
//modelParamsNode["use_surrogate"] >> USE_SURROGATE;
|
||||
model = Boost::create( Boost::Params(BOOST_TYPE, WEAK_COUNT, WEIGHT_TRIM_RATE, MAX_DEPTH, USE_SURROGATE, Mat()) );
|
||||
|
||||
Ptr<Boost> m = Boost::create();
|
||||
m->setBoostType(BOOST_TYPE);
|
||||
m->setWeakCount(WEAK_COUNT);
|
||||
m->setWeightTrimRate(WEIGHT_TRIM_RATE);
|
||||
m->setMaxDepth(MAX_DEPTH);
|
||||
m->setUseSurrogates(USE_SURROGATE);
|
||||
m->setPriors(Mat());
|
||||
model = m;
|
||||
}
|
||||
else if( modelName == CV_RTREES )
|
||||
{
|
||||
@ -416,9 +422,18 @@ int CV_MLBaseTest::train( int testCaseIdx )
|
||||
modelParamsNode["is_pruned"] >> IS_PRUNED;
|
||||
modelParamsNode["nactive_vars"] >> NACTIVE_VARS;
|
||||
modelParamsNode["max_trees_num"] >> MAX_TREES_NUM;
|
||||
model = RTrees::create(RTrees::Params( MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY,
|
||||
USE_SURROGATE, MAX_CATEGORIES, Mat(), true, // (calc_var_importance == true) <=> RF processes variable importance
|
||||
NACTIVE_VARS, TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS)));
|
||||
|
||||
Ptr<RTrees> m = RTrees::create();
|
||||
m->setMaxDepth(MAX_DEPTH);
|
||||
m->setMinSampleCount(MIN_SAMPLE_COUNT);
|
||||
m->setRegressionAccuracy(REG_ACCURACY);
|
||||
m->setUseSurrogates(USE_SURROGATE);
|
||||
m->setMaxCategories(MAX_CATEGORIES);
|
||||
m->setPriors(Mat());
|
||||
m->setCalculateVarImportance(true);
|
||||
m->setActiveVarCount(NACTIVE_VARS);
|
||||
m->setTermCriteria(TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS));
|
||||
model = m;
|
||||
}
|
||||
|
||||
if( !model.empty() )
|
||||
|
@ -149,9 +149,8 @@ int CV_SLMLTest::validate_test_results( int testCaseIdx )
|
||||
}
|
||||
|
||||
TEST(ML_NaiveBayes, save_load) { CV_SLMLTest test( CV_NBAYES ); test.safe_run(); }
|
||||
//CV_SLMLTest lsmlknearest( CV_KNEAREST, "slknearest" ); // does not support save!
|
||||
TEST(ML_KNearest, save_load) { CV_SLMLTest test( CV_KNEAREST ); test.safe_run(); }
|
||||
TEST(ML_SVM, save_load) { CV_SLMLTest test( CV_SVM ); test.safe_run(); }
|
||||
//CV_SLMLTest lsmlem( CV_EM, "slem" ); // does not support save!
|
||||
TEST(ML_ANN, save_load) { CV_SLMLTest test( CV_ANN ); test.safe_run(); }
|
||||
TEST(ML_DTree, save_load) { CV_SLMLTest test( CV_DTREE ); test.safe_run(); }
|
||||
TEST(ML_Boost, save_load) { CV_SLMLTest test( CV_BOOST ); test.safe_run(); }
|
||||
|
@ -104,34 +104,34 @@ namespace cv
|
||||
*/
|
||||
virtual void collectGarbage();
|
||||
|
||||
//! @name Scale factor
|
||||
//! @brief Scale factor
|
||||
CV_PURE_PROPERTY(int, Scale)
|
||||
|
||||
//! @name Iterations count
|
||||
//! @brief Iterations count
|
||||
CV_PURE_PROPERTY(int, Iterations)
|
||||
|
||||
//! @name Asymptotic value of steepest descent method
|
||||
//! @brief Asymptotic value of steepest descent method
|
||||
CV_PURE_PROPERTY(double, Tau)
|
||||
|
||||
//! @name Weight parameter to balance data term and smoothness term
|
||||
//! @brief Weight parameter to balance data term and smoothness term
|
||||
CV_PURE_PROPERTY(double, Labmda)
|
||||
|
||||
//! @name Parameter of spacial distribution in Bilateral-TV
|
||||
//! @brief Parameter of spacial distribution in Bilateral-TV
|
||||
CV_PURE_PROPERTY(double, Alpha)
|
||||
|
||||
//! @name Kernel size of Bilateral-TV filter
|
||||
//! @brief Kernel size of Bilateral-TV filter
|
||||
CV_PURE_PROPERTY(int, KernelSize)
|
||||
|
||||
//! @name Gaussian blur kernel size
|
||||
//! @brief Gaussian blur kernel size
|
||||
CV_PURE_PROPERTY(int, BlurKernelSize)
|
||||
|
||||
//! @name Gaussian blur sigma
|
||||
//! @brief Gaussian blur sigma
|
||||
CV_PURE_PROPERTY(double, BlurSigma)
|
||||
|
||||
//! @name Radius of the temporal search area
|
||||
//! @brief Radius of the temporal search area
|
||||
CV_PURE_PROPERTY(int, TemporalAreaRadius)
|
||||
|
||||
//! @name Dense optical flow algorithm
|
||||
//! @brief Dense optical flow algorithm
|
||||
CV_PURE_PROPERTY_S(Ptr<cv::superres::DenseOpticalFlowExt>, OpticalFlow)
|
||||
|
||||
protected:
|
||||
|
@ -98,17 +98,17 @@ namespace cv
|
||||
class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt
|
||||
{
|
||||
public:
|
||||
//! @name Flow smoothness
|
||||
//! @brief Flow smoothness
|
||||
CV_PURE_PROPERTY(double, Alpha)
|
||||
//! @name Gradient constancy importance
|
||||
//! @brief Gradient constancy importance
|
||||
CV_PURE_PROPERTY(double, Gamma)
|
||||
//! @name Pyramid scale factor
|
||||
//! @brief Pyramid scale factor
|
||||
CV_PURE_PROPERTY(double, ScaleFactor)
|
||||
//! @name Number of lagged non-linearity iterations (inner loop)
|
||||
//! @brief Number of lagged non-linearity iterations (inner loop)
|
||||
CV_PURE_PROPERTY(int, InnerIterations)
|
||||
//! @name Number of warping iterations (number of pyramid levels)
|
||||
//! @brief Number of warping iterations (number of pyramid levels)
|
||||
CV_PURE_PROPERTY(int, OuterIterations)
|
||||
//! @name Number of linear system solver iterations
|
||||
//! @brief Number of linear system solver iterations
|
||||
CV_PURE_PROPERTY(int, SolverIterations)
|
||||
};
|
||||
CV_EXPORTS Ptr<BroxOpticalFlow> createOptFlow_Brox_CUDA();
|
||||
|
@ -328,18 +328,6 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Simple()
|
||||
|
||||
namespace
|
||||
{
|
||||
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) \
|
||||
type get##name() const \
|
||||
{ \
|
||||
return internal_obj->get##internal_name(); \
|
||||
} \
|
||||
void set##name(type _name) \
|
||||
{ \
|
||||
internal_obj->set##internal_name(_name); \
|
||||
}
|
||||
|
||||
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
|
||||
|
||||
class DualTVL1 : public CpuOpticalFlow, public virtual cv::superres::DualTVL1OpticalFlow
|
||||
{
|
||||
public:
|
||||
@ -347,14 +335,14 @@ namespace
|
||||
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
|
||||
void collectGarbage();
|
||||
|
||||
CV_WRAP_SAME_PROPERTY(double, Tau, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(double, Lambda, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(double, Theta, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(int, ScalesNumber, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(int, WarpingsNumber, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(double, Epsilon, alg_)
|
||||
CV_WRAP_PROPERTY(int, Iterations, OuterIterations, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseInitialFlow, alg_)
|
||||
CV_WRAP_SAME_PROPERTY(double, Tau, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(double, Lambda, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(double, Theta, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(int, ScalesNumber, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(int, WarpingsNumber, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(double, Epsilon, (*alg_))
|
||||
CV_WRAP_PROPERTY(int, Iterations, OuterIterations, (*alg_))
|
||||
CV_WRAP_SAME_PROPERTY(bool, UseInitialFlow, (*alg_))
|
||||
|
||||
protected:
|
||||
void impl(InputArray input0, InputArray input1, OutputArray dst);
|
||||
|
@ -440,29 +440,29 @@ Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flo
|
||||
class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow
|
||||
{
|
||||
public:
|
||||
//! @name Time step of the numerical scheme
|
||||
//! @brief Time step of the numerical scheme
|
||||
CV_PURE_PROPERTY(double, Tau)
|
||||
//! @name Weight parameter for the data term, attachment parameter
|
||||
//! @brief Weight parameter for the data term, attachment parameter
|
||||
CV_PURE_PROPERTY(double, Lambda)
|
||||
//! @name Weight parameter for (u - v)^2, tightness parameter
|
||||
//! @brief Weight parameter for (u - v)^2, tightness parameter
|
||||
CV_PURE_PROPERTY(double, Theta)
|
||||
//! @name coefficient for additional illumination variation term
|
||||
//! @brief coefficient for additional illumination variation term
|
||||
CV_PURE_PROPERTY(double, Gamma)
|
||||
//! @name Number of scales used to create the pyramid of images
|
||||
//! @brief Number of scales used to create the pyramid of images
|
||||
CV_PURE_PROPERTY(int, ScalesNumber)
|
||||
//! @name Number of warpings per scale
|
||||
//! @brief Number of warpings per scale
|
||||
CV_PURE_PROPERTY(int, WarpingsNumber)
|
||||
//! @name Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
|
||||
//! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
|
||||
CV_PURE_PROPERTY(double, Epsilon)
|
||||
//! @name Inner iterations (between outlier filtering) used in the numerical scheme
|
||||
//! @brief Inner iterations (between outlier filtering) used in the numerical scheme
|
||||
CV_PURE_PROPERTY(int, InnerIterations)
|
||||
//! @name Outer iterations (number of inner loops) used in the numerical scheme
|
||||
//! @brief Outer iterations (number of inner loops) used in the numerical scheme
|
||||
CV_PURE_PROPERTY(int, OuterIterations)
|
||||
//! @name Use initial flow
|
||||
//! @brief Use initial flow
|
||||
CV_PURE_PROPERTY(bool, UseInitialFlow)
|
||||
//! @name Step between scales (<1)
|
||||
//! @brief Step between scales (<1)
|
||||
CV_PURE_PROPERTY(double, ScaleStep)
|
||||
//! @name Median filter kernel size (1 = no filter) (3 or 5)
|
||||
//! @brief Median filter kernel size (1 = no filter) (3 or 5)
|
||||
CV_PURE_PROPERTY(int, MedianFiltering)
|
||||
};
|
||||
|
||||
|
@ -36,9 +36,11 @@ int main( int /*argc*/, char** /*argv*/ )
|
||||
samples = samples.reshape(1, 0);
|
||||
|
||||
// cluster the data
|
||||
Ptr<EM> em_model = EM::train( samples, noArray(), labels, noArray(),
|
||||
EM::Params(N, EM::COV_MAT_SPHERICAL,
|
||||
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1)));
|
||||
Ptr<EM> em_model = EM::create();
|
||||
em_model->setClustersNumber(N);
|
||||
em_model->setCovarianceMatrixType(EM::COV_MAT_SPHERICAL);
|
||||
em_model->setTermCriteria(TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1));
|
||||
em_model->trainEM( samples, noArray(), labels, noArray() );
|
||||
|
||||
// classify every image pixel
|
||||
for( i = 0; i < img.rows; i++ )
|
||||
|
@ -178,8 +178,23 @@ build_rtrees_classifier( const string& data_filename,
|
||||
{
|
||||
// create classifier by using <data> and <responses>
|
||||
cout << "Training the classifier ...\n";
|
||||
// Params( int maxDepth, int minSampleCount,
|
||||
// double regressionAccuracy, bool useSurrogates,
|
||||
// int maxCategories, const Mat& priors,
|
||||
// bool calcVarImportance, int nactiveVars,
|
||||
// TermCriteria termCrit );
|
||||
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
||||
model = StatModel::train<RTrees>(tdata, RTrees::Params(10,10,0,false,15,Mat(),true,4,TC(100,0.01f)));
|
||||
model = RTrees::create();
|
||||
model->setMaxDepth(10);
|
||||
model->setMinSampleCount(10);
|
||||
model->setRegressionAccuracy(0);
|
||||
model->setUseSurrogates(false);
|
||||
model->setMaxCategories(15);
|
||||
model->setPriors(Mat());
|
||||
model->setCalculateVarImportance(true);
|
||||
model->setActiveVarCount(4);
|
||||
model->setTermCriteria(TC(100,0.01f));
|
||||
model->train(tdata);
|
||||
cout << endl;
|
||||
}
|
||||
|
||||
@ -269,7 +284,14 @@ build_boost_classifier( const string& data_filename,
|
||||
priors[1] = 26;
|
||||
|
||||
cout << "Training the classifier (may take a few minutes)...\n";
|
||||
model = StatModel::train<Boost>(tdata, Boost::Params(Boost::GENTLE, 100, 0.95, 5, false, Mat(priors) ));
|
||||
model = Boost::create();
|
||||
model->setBoostType(Boost::GENTLE);
|
||||
model->setWeakCount(100);
|
||||
model->setWeightTrimRate(0.95);
|
||||
model->setMaxDepth(5);
|
||||
model->setUseSurrogates(false);
|
||||
model->setPriors(Mat(priors));
|
||||
model->train(tdata);
|
||||
cout << endl;
|
||||
}
|
||||
|
||||
@ -374,11 +396,11 @@ build_mlp_classifier( const string& data_filename,
|
||||
Mat layer_sizes( 1, nlayers, CV_32S, layer_sz );
|
||||
|
||||
#if 1
|
||||
int method = ANN_MLP::Params::BACKPROP;
|
||||
int method = ANN_MLP::BACKPROP;
|
||||
double method_param = 0.001;
|
||||
int max_iter = 300;
|
||||
#else
|
||||
int method = ANN_MLP::Params::RPROP;
|
||||
int method = ANN_MLP::RPROP;
|
||||
double method_param = 0.1;
|
||||
int max_iter = 1000;
|
||||
#endif
|
||||
@ -386,7 +408,12 @@ build_mlp_classifier( const string& data_filename,
|
||||
Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
|
||||
|
||||
cout << "Training the classifier (may take a few minutes)...\n";
|
||||
model = StatModel::train<ANN_MLP>(tdata, ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0, TC(max_iter,0), method, method_param));
|
||||
model = ANN_MLP::create();
|
||||
model->setLayerSizes(layer_sizes);
|
||||
model->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0);
|
||||
model->setTermCriteria(TC(max_iter,0));
|
||||
model->setTrainMethod(method, method_param);
|
||||
model->train(tdata);
|
||||
cout << endl;
|
||||
}
|
||||
|
||||
@ -403,7 +430,6 @@ build_knearest_classifier( const string& data_filename, int K )
|
||||
if( !ok )
|
||||
return ok;
|
||||
|
||||
Ptr<KNearest> model;
|
||||
|
||||
int nsamples_all = data.rows;
|
||||
int ntrain_samples = (int)(nsamples_all*0.8);
|
||||
@ -411,7 +437,10 @@ build_knearest_classifier( const string& data_filename, int K )
|
||||
// create classifier by using <data> and <responses>
|
||||
cout << "Training the classifier ...\n";
|
||||
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
||||
model = StatModel::train<KNearest>(tdata, KNearest::Params(K, true));
|
||||
Ptr<KNearest> model = KNearest::create();
|
||||
model->setDefaultK(K);
|
||||
model->setIsClassifier(true);
|
||||
model->train(tdata);
|
||||
cout << endl;
|
||||
|
||||
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
|
||||
@ -435,7 +464,8 @@ build_nbayes_classifier( const string& data_filename )
|
||||
// create classifier by using <data> and <responses>
|
||||
cout << "Training the classifier ...\n";
|
||||
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
||||
model = StatModel::train<NormalBayesClassifier>(tdata, NormalBayesClassifier::Params());
|
||||
model = NormalBayesClassifier::create();
|
||||
model->train(tdata);
|
||||
cout << endl;
|
||||
|
||||
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
|
||||
@ -471,13 +501,11 @@ build_svm_classifier( const string& data_filename,
|
||||
// create classifier by using <data> and <responses>
|
||||
cout << "Training the classifier ...\n";
|
||||
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
|
||||
|
||||
SVM::Params params;
|
||||
params.svmType = SVM::C_SVC;
|
||||
params.kernelType = SVM::LINEAR;
|
||||
params.C = 1;
|
||||
|
||||
model = StatModel::train<SVM>(tdata, params);
|
||||
model = SVM::create();
|
||||
model->setType(SVM::C_SVC);
|
||||
model->setKernel(SVM::LINEAR);
|
||||
model->setC(1);
|
||||
model->train(tdata);
|
||||
cout << endl;
|
||||
}
|
||||
|
||||
|
@ -132,20 +132,16 @@ int main()
|
||||
showImage(data_train, 28, "train data");
|
||||
showImage(data_test, 28, "test data");
|
||||
|
||||
|
||||
// simple case with batch gradient
|
||||
LogisticRegression::Params params = LogisticRegression::Params(
|
||||
0.001, 10, LogisticRegression::BATCH, LogisticRegression::REG_L2, 1, 1);
|
||||
// simple case with mini-batch gradient
|
||||
// LogisticRegression::Params params = LogisticRegression::Params(
|
||||
// 0.001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
|
||||
|
||||
// mini-batch gradient with higher accuracy
|
||||
// LogisticRegression::Params params = LogisticRegression::Params(
|
||||
// 0.000001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
|
||||
|
||||
cout << "training...";
|
||||
Ptr<StatModel> lr1 = LogisticRegression::create(params);
|
||||
//! [init]
|
||||
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
|
||||
lr1->setLearningRate(0.001);
|
||||
lr1->setIterations(10);
|
||||
lr1->setRegularization(LogisticRegression::REG_L2);
|
||||
lr1->setTrainMethod(LogisticRegression::BATCH);
|
||||
lr1->setMiniBatchSize(1);
|
||||
//! [init]
|
||||
lr1->train(data_train, ROW_SAMPLE, labels_train);
|
||||
cout << "done!" << endl;
|
||||
|
||||
|
@ -102,7 +102,7 @@ static void predict_and_paint(const Ptr<StatModel>& model, Mat& dst)
|
||||
static void find_decision_boundary_NBC()
|
||||
{
|
||||
// learn classifier
|
||||
Ptr<NormalBayesClassifier> normalBayesClassifier = StatModel::train<NormalBayesClassifier>(prepare_train_data(), NormalBayesClassifier::Params());
|
||||
Ptr<NormalBayesClassifier> normalBayesClassifier = StatModel::train<NormalBayesClassifier>(prepare_train_data());
|
||||
|
||||
predict_and_paint(normalBayesClassifier, imgDst);
|
||||
}
|
||||
@ -112,15 +112,29 @@ static void find_decision_boundary_NBC()
|
||||
#if _KNN_
|
||||
static void find_decision_boundary_KNN( int K )
|
||||
{
|
||||
Ptr<KNearest> knn = StatModel::train<KNearest>(prepare_train_data(), KNearest::Params(K, true));
|
||||
|
||||
Ptr<KNearest> knn = KNearest::create();
|
||||
knn->setDefaultK(K);
|
||||
knn->setIsClassifier(true);
|
||||
knn->train(prepare_train_data());
|
||||
predict_and_paint(knn, imgDst);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if _SVM_
|
||||
static void find_decision_boundary_SVM( SVM::Params params )
|
||||
static void find_decision_boundary_SVM( double C )
|
||||
{
|
||||
Ptr<SVM> svm = StatModel::train<SVM>(prepare_train_data(), params);
|
||||
Ptr<SVM> svm = SVM::create();
|
||||
svm->setType(SVM::C_SVC);
|
||||
svm->setKernel(SVM::POLY); //SVM::LINEAR;
|
||||
svm->setDegree(0.5);
|
||||
svm->setGamma(1);
|
||||
svm->setCoef0(1);
|
||||
svm->setNu(0.5);
|
||||
svm->setP(0);
|
||||
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01));
|
||||
svm->setC(C);
|
||||
svm->train(prepare_train_data());
|
||||
predict_and_paint(svm, imgDst);
|
||||
|
||||
Mat sv = svm->getSupportVectors();
|
||||
@ -135,16 +149,14 @@ static void find_decision_boundary_SVM( SVM::Params params )
|
||||
#if _DT_
|
||||
static void find_decision_boundary_DT()
|
||||
{
|
||||
DTrees::Params params;
|
||||
params.maxDepth = 8;
|
||||
params.minSampleCount = 2;
|
||||
params.useSurrogates = false;
|
||||
params.CVFolds = 0; // the number of cross-validation folds
|
||||
params.use1SERule = false;
|
||||
params.truncatePrunedTree = false;
|
||||
|
||||
Ptr<DTrees> dtree = StatModel::train<DTrees>(prepare_train_data(), params);
|
||||
|
||||
Ptr<DTrees> dtree = DTrees::create();
|
||||
dtree->setMaxDepth(8);
|
||||
dtree->setMinSampleCount(2);
|
||||
dtree->setUseSurrogates(false);
|
||||
dtree->setCVFolds(0); // the number of cross-validation folds
|
||||
dtree->setUse1SERule(false);
|
||||
dtree->setTruncatePrunedTree(false);
|
||||
dtree->train(prepare_train_data());
|
||||
predict_and_paint(dtree, imgDst);
|
||||
}
|
||||
#endif
|
||||
@ -152,15 +164,14 @@ static void find_decision_boundary_DT()
|
||||
#if _BT_
|
||||
static void find_decision_boundary_BT()
|
||||
{
|
||||
Boost::Params params( Boost::DISCRETE, // boost_type
|
||||
100, // weak_count
|
||||
0.95, // weight_trim_rate
|
||||
2, // max_depth
|
||||
false, //use_surrogates
|
||||
Mat() // priors
|
||||
);
|
||||
|
||||
Ptr<Boost> boost = StatModel::train<Boost>(prepare_train_data(), params);
|
||||
Ptr<Boost> boost = Boost::create();
|
||||
boost->setBoostType(Boost::DISCRETE);
|
||||
boost->setWeakCount(100);
|
||||
boost->setWeightTrimRate(0.95);
|
||||
boost->setMaxDepth(2);
|
||||
boost->setUseSurrogates(false);
|
||||
boost->setPriors(Mat());
|
||||
boost->train(prepare_train_data());
|
||||
predict_and_paint(boost, imgDst);
|
||||
}
|
||||
|
||||
@ -185,18 +196,17 @@ static void find_decision_boundary_GBT()
|
||||
#if _RF_
|
||||
static void find_decision_boundary_RF()
|
||||
{
|
||||
RTrees::Params params( 4, // max_depth,
|
||||
2, // min_sample_count,
|
||||
0.f, // regression_accuracy,
|
||||
false, // use_surrogates,
|
||||
16, // max_categories,
|
||||
Mat(), // priors,
|
||||
false, // calc_var_importance,
|
||||
1, // nactive_vars,
|
||||
TermCriteria(TermCriteria::MAX_ITER, 5, 0) // max_num_of_trees_in_the_forest,
|
||||
);
|
||||
|
||||
Ptr<RTrees> rtrees = StatModel::train<RTrees>(prepare_train_data(), params);
|
||||
Ptr<RTrees> rtrees = RTrees::create();
|
||||
rtrees->setMaxDepth(4);
|
||||
rtrees->setMinSampleCount(2);
|
||||
rtrees->setRegressionAccuracy(0.f);
|
||||
rtrees->setUseSurrogates(false);
|
||||
rtrees->setMaxCategories(16);
|
||||
rtrees->setPriors(Mat());
|
||||
rtrees->setCalculateVarImportance(false);
|
||||
rtrees->setActiveVarCount(1);
|
||||
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 5, 0));
|
||||
rtrees->train(prepare_train_data());
|
||||
predict_and_paint(rtrees, imgDst);
|
||||
}
|
||||
|
||||
@ -205,9 +215,6 @@ static void find_decision_boundary_RF()
|
||||
#if _ANN_
|
||||
static void find_decision_boundary_ANN( const Mat& layer_sizes )
|
||||
{
|
||||
ANN_MLP::Params params(layer_sizes, ANN_MLP::SIGMOID_SYM, 1, 1, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON),
|
||||
ANN_MLP::Params::BACKPROP, 0.001);
|
||||
|
||||
Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 );
|
||||
for( int i = 0; i < trainClasses.rows; i++ )
|
||||
{
|
||||
@ -217,7 +224,12 @@ static void find_decision_boundary_ANN( const Mat& layer_sizes )
|
||||
Mat samples = prepare_train_samples(trainedPoints);
|
||||
Ptr<TrainData> tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses);
|
||||
|
||||
Ptr<ANN_MLP> ann = StatModel::train<ANN_MLP>(tdata, params);
|
||||
Ptr<ANN_MLP> ann = ANN_MLP::create();
|
||||
ann->setLayerSizes(layer_sizes);
|
||||
ann->setActivationFunction(ANN_MLP::SIGMOID_SYM, 1, 1);
|
||||
ann->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON));
|
||||
ann->setTrainMethod(ANN_MLP::BACKPROP, 0.001);
|
||||
ann->train(tdata);
|
||||
predict_and_paint(ann, imgDst);
|
||||
}
|
||||
#endif
|
||||
@ -247,8 +259,11 @@ static void find_decision_boundary_EM()
|
||||
// learn models
|
||||
if( !modelSamples.empty() )
|
||||
{
|
||||
em_models[i] = EM::train(modelSamples, noArray(), noArray(), noArray(),
|
||||
EM::Params(componentCount, EM::COV_MAT_DIAGONAL));
|
||||
Ptr<EM> em = EM::create();
|
||||
em->setClustersNumber(componentCount);
|
||||
em->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
|
||||
em->trainEM(modelSamples, noArray(), noArray(), noArray());
|
||||
em_models[i] = em;
|
||||
}
|
||||
}
|
||||
|
||||
@ -332,33 +347,20 @@ int main()
|
||||
imshow( "NormalBayesClassifier", imgDst );
|
||||
#endif
|
||||
#if _KNN_
|
||||
int K = 3;
|
||||
find_decision_boundary_KNN( K );
|
||||
find_decision_boundary_KNN( 3 );
|
||||
imshow( "kNN", imgDst );
|
||||
|
||||
K = 15;
|
||||
find_decision_boundary_KNN( K );
|
||||
find_decision_boundary_KNN( 15 );
|
||||
imshow( "kNN2", imgDst );
|
||||
#endif
|
||||
|
||||
#if _SVM_
|
||||
//(1)-(2)separable and not sets
|
||||
SVM::Params params;
|
||||
params.svmType = SVM::C_SVC;
|
||||
params.kernelType = SVM::POLY; //CvSVM::LINEAR;
|
||||
params.degree = 0.5;
|
||||
params.gamma = 1;
|
||||
params.coef0 = 1;
|
||||
params.C = 1;
|
||||
params.nu = 0.5;
|
||||
params.p = 0;
|
||||
params.termCrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01);
|
||||
|
||||
find_decision_boundary_SVM( params );
|
||||
find_decision_boundary_SVM( 1 );
|
||||
imshow( "classificationSVM1", imgDst );
|
||||
|
||||
params.C = 10;
|
||||
find_decision_boundary_SVM( params );
|
||||
find_decision_boundary_SVM( 10 );
|
||||
imshow( "classificationSVM2", imgDst );
|
||||
#endif
|
||||
|
||||
|
@ -141,7 +141,7 @@ Mat get_hogdescriptor_visu(const Mat& color_origImg, vector<float>& descriptorVa
|
||||
|
||||
int cellSize = 8;
|
||||
int gradientBinSize = 9;
|
||||
float radRangeForOneBin = (float)(CV_PI/(float)gradientBinSize); // dividing 180° into 9 bins, how large (in rad) is one bin?
|
||||
float radRangeForOneBin = (float)(CV_PI/(float)gradientBinSize); // dividing 180 into 9 bins, how large (in rad) is one bin?
|
||||
|
||||
// prepare data structure: 9 orientation / gradient strenghts for each cell
|
||||
int cells_in_x_dir = DIMX / cellSize;
|
||||
@ -313,23 +313,23 @@ void compute_hog( const vector< Mat > & img_lst, vector< Mat > & gradient_lst, c
|
||||
|
||||
void train_svm( const vector< Mat > & gradient_lst, const vector< int > & labels )
|
||||
{
|
||||
/* Default values to train SVM */
|
||||
SVM::Params params;
|
||||
params.coef0 = 0.0;
|
||||
params.degree = 3;
|
||||
params.termCrit.epsilon = 1e-3;
|
||||
params.gamma = 0;
|
||||
params.kernelType = SVM::LINEAR;
|
||||
params.nu = 0.5;
|
||||
params.p = 0.1; // for EPSILON_SVR, epsilon in loss function?
|
||||
params.C = 0.01; // From paper, soft classifier
|
||||
params.svmType = SVM::EPS_SVR; // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task
|
||||
|
||||
Mat train_data;
|
||||
convert_to_ml( gradient_lst, train_data );
|
||||
|
||||
clog << "Start training...";
|
||||
Ptr<SVM> svm = StatModel::train<SVM>(train_data, ROW_SAMPLE, Mat(labels), params);
|
||||
Ptr<SVM> svm = SVM::create();
|
||||
/* Default values to train SVM */
|
||||
svm->setCoef0(0.0);
|
||||
svm->setDegree(3);
|
||||
svm->setTermCriteria(TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, 1e-3 ));
|
||||
svm->setGamma(0);
|
||||
svm->setKernel(SVM::LINEAR);
|
||||
svm->setNu(0.5);
|
||||
svm->setP(0.1); // for EPSILON_SVR, epsilon in loss function?
|
||||
svm->setC(0.01); // From paper, soft classifier
|
||||
svm->setType(SVM::EPS_SVR); // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task
|
||||
svm->train(train_data, ROW_SAMPLE, Mat(labels));
|
||||
clog << "...[done]" << endl;
|
||||
|
||||
svm->save( "my_people_detector.yml" );
|
||||
|
@ -73,18 +73,42 @@ int main(int argc, char** argv)
|
||||
data->setTrainTestSplitRatio(train_test_split_ratio);
|
||||
|
||||
printf("======DTREE=====\n");
|
||||
Ptr<DTrees> dtree = DTrees::create(DTrees::Params( 10, 2, 0, false, 16, 0, false, false, Mat() ));
|
||||
Ptr<DTrees> dtree = DTrees::create();
|
||||
dtree->setMaxDepth(10);
|
||||
dtree->setMinSampleCount(2);
|
||||
dtree->setRegressionAccuracy(0);
|
||||
dtree->setUseSurrogates(false);
|
||||
dtree->setMaxCategories(16);
|
||||
dtree->setCVFolds(0);
|
||||
dtree->setUse1SERule(false);
|
||||
dtree->setTruncatePrunedTree(false);
|
||||
dtree->setPriors(Mat());
|
||||
train_and_print_errs(dtree, data);
|
||||
|
||||
if( (int)data->getClassLabels().total() <= 2 ) // regression or 2-class classification problem
|
||||
{
|
||||
printf("======BOOST=====\n");
|
||||
Ptr<Boost> boost = Boost::create(Boost::Params(Boost::GENTLE, 100, 0.95, 2, false, Mat()));
|
||||
Ptr<Boost> boost = Boost::create();
|
||||
boost->setBoostType(Boost::GENTLE);
|
||||
boost->setWeakCount(100);
|
||||
boost->setWeightTrimRate(0.95);
|
||||
boost->setMaxDepth(2);
|
||||
boost->setUseSurrogates(false);
|
||||
boost->setPriors(Mat());
|
||||
train_and_print_errs(boost, data);
|
||||
}
|
||||
|
||||
printf("======RTREES=====\n");
|
||||
Ptr<RTrees> rtrees = RTrees::create(RTrees::Params(10, 2, 0, false, 16, Mat(), false, 0, TermCriteria(TermCriteria::MAX_ITER, 100, 0)));
|
||||
Ptr<RTrees> rtrees = RTrees::create();
|
||||
rtrees->setMaxDepth(10);
|
||||
rtrees->setMinSampleCount(2);
|
||||
rtrees->setRegressionAccuracy(0);
|
||||
rtrees->setUseSurrogates(false);
|
||||
rtrees->setMaxCategories(16);
|
||||
rtrees->setPriors(Mat());
|
||||
rtrees->setCalculateVarImportance(false);
|
||||
rtrees->setActiveVarCount(0);
|
||||
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0));
|
||||
train_and_print_errs(rtrees, data);
|
||||
|
||||
return 0;
|
||||
|
@ -14,23 +14,30 @@ int main(int, char**)
|
||||
Mat image = Mat::zeros(height, width, CV_8UC3);
|
||||
|
||||
// Set up training data
|
||||
//! [setup1]
|
||||
int labels[4] = {1, -1, -1, -1};
|
||||
Mat labelsMat(4, 1, CV_32SC1, labels);
|
||||
|
||||
float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} };
|
||||
//! [setup1]
|
||||
//! [setup2]
|
||||
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
|
||||
Mat labelsMat(4, 1, CV_32SC1, labels);
|
||||
//! [setup2]
|
||||
|
||||
// Set up SVM's parameters
|
||||
SVM::Params params;
|
||||
params.svmType = SVM::C_SVC;
|
||||
params.kernelType = SVM::LINEAR;
|
||||
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6);
|
||||
|
||||
// Train the SVM
|
||||
Ptr<SVM> svm = StatModel::train<SVM>(trainingDataMat, ROW_SAMPLE, labelsMat, params);
|
||||
//! [init]
|
||||
Ptr<SVM> svm = SVM::create();
|
||||
svm->setType(SVM::C_SVC);
|
||||
svm->setKernel(SVM::LINEAR);
|
||||
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
|
||||
//! [init]
|
||||
//! [train]
|
||||
svm->train(trainingDataMat, ROW_SAMPLE, labelsMat);
|
||||
//! [train]
|
||||
|
||||
Vec3b green(0,255,0), blue (255,0,0);
|
||||
// Show the decision regions given by the SVM
|
||||
//! [show]
|
||||
Vec3b green(0,255,0), blue (255,0,0);
|
||||
for (int i = 0; i < image.rows; ++i)
|
||||
for (int j = 0; j < image.cols; ++j)
|
||||
{
|
||||
@ -42,16 +49,20 @@ int main(int, char**)
|
||||
else if (response == -1)
|
||||
image.at<Vec3b>(i,j) = blue;
|
||||
}
|
||||
//! [show]
|
||||
|
||||
// Show the training data
|
||||
//! [show_data]
|
||||
int thickness = -1;
|
||||
int lineType = 8;
|
||||
circle( image, Point(501, 10), 5, Scalar( 0, 0, 0), thickness, lineType );
|
||||
circle( image, Point(255, 10), 5, Scalar(255, 255, 255), thickness, lineType );
|
||||
circle( image, Point(501, 255), 5, Scalar(255, 255, 255), thickness, lineType );
|
||||
circle( image, Point( 10, 501), 5, Scalar(255, 255, 255), thickness, lineType );
|
||||
//! [show_data]
|
||||
|
||||
// Show support vectors
|
||||
//! [show_vectors]
|
||||
thickness = 2;
|
||||
lineType = 8;
|
||||
Mat sv = svm->getSupportVectors();
|
||||
@ -61,6 +72,7 @@ int main(int, char**)
|
||||
const float* v = sv.ptr<float>(i);
|
||||
circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
|
||||
}
|
||||
//! [show_vectors]
|
||||
|
||||
imwrite("result.png", image); // save the image
|
||||
|
||||
|
@ -39,6 +39,7 @@ int main()
|
||||
// Set up the linearly separable part of the training data
|
||||
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
|
||||
|
||||
//! [setup1]
|
||||
// Generate random points for the class 1
|
||||
Mat trainClass = trainData.rowRange(0, nLinearSamples);
|
||||
// The x coordinate of the points is in [0, 0.4)
|
||||
@ -56,9 +57,10 @@ int main()
|
||||
// The y coordinate of the points is in [0, 1)
|
||||
c = trainClass.colRange(1,2);
|
||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
||||
//! [setup1]
|
||||
|
||||
//------------------ Set up the non-linearly separable part of the training data ---------------
|
||||
|
||||
//! [setup2]
|
||||
// Generate random points for the classes 1 and 2
|
||||
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
|
||||
// The x coordinate of the points is in [0.4, 0.6)
|
||||
@ -67,24 +69,28 @@ int main()
|
||||
// The y coordinate of the points is in [0, 1)
|
||||
c = trainClass.colRange(1,2);
|
||||
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
|
||||
|
||||
//! [setup2]
|
||||
//------------------------- Set up the labels for the classes ---------------------------------
|
||||
labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1
|
||||
labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2
|
||||
|
||||
//------------------------ 2. Set up the support vector machines parameters --------------------
|
||||
SVM::Params params;
|
||||
params.svmType = SVM::C_SVC;
|
||||
params.C = 0.1;
|
||||
params.kernelType = SVM::LINEAR;
|
||||
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6);
|
||||
|
||||
//------------------------ 3. Train the svm ----------------------------------------------------
|
||||
cout << "Starting training process" << endl;
|
||||
Ptr<SVM> svm = StatModel::train<SVM>(trainData, ROW_SAMPLE, labels, params);
|
||||
//! [init]
|
||||
Ptr<SVM> svm = SVM::create();
|
||||
svm->setType(SVM::C_SVC);
|
||||
svm->setC(0.1);
|
||||
svm->setKernel(SVM::LINEAR);
|
||||
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6));
|
||||
//! [init]
|
||||
//! [train]
|
||||
svm->train(trainData, ROW_SAMPLE, labels);
|
||||
//! [train]
|
||||
cout << "Finished training process" << endl;
|
||||
|
||||
//------------------------ 4. Show the decision regions ----------------------------------------
|
||||
//! [show]
|
||||
Vec3b green(0,100,0), blue (100,0,0);
|
||||
for (int i = 0; i < I.rows; ++i)
|
||||
for (int j = 0; j < I.cols; ++j)
|
||||
@ -95,8 +101,10 @@ int main()
|
||||
if (response == 1) I.at<Vec3b>(j, i) = green;
|
||||
else if (response == 2) I.at<Vec3b>(j, i) = blue;
|
||||
}
|
||||
//! [show]
|
||||
|
||||
//----------------------- 5. Show the training data --------------------------------------------
|
||||
//! [show_data]
|
||||
int thick = -1;
|
||||
int lineType = 8;
|
||||
float px, py;
|
||||
@ -114,8 +122,10 @@ int main()
|
||||
py = trainData.at<float>(i,1);
|
||||
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
|
||||
}
|
||||
//! [show_data]
|
||||
|
||||
//------------------------- 6. Show support vectors --------------------------------------------
|
||||
//! [show_vectors]
|
||||
thick = 2;
|
||||
lineType = 8;
|
||||
Mat sv = svm->getSupportVectors();
|
||||
@ -125,6 +135,7 @@ int main()
|
||||
const float* v = sv.ptr<float>(i);
|
||||
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
|
||||
}
|
||||
//! [show_vectors]
|
||||
|
||||
imwrite("result.png", I); // save the Image
|
||||
imshow("SVM for Non-Linear Training Data", I); // show it to the user
|
||||
|
Loading…
Reference in New Issue
Block a user