Merge pull request #3679 from mshabunin:remove-algorithm-factory

This commit is contained in:
Vadim Pisarevsky 2015-02-19 18:43:58 +00:00
commit 7b270f4c69
63 changed files with 1739 additions and 3490 deletions

View File

@ -122,7 +122,6 @@ CV_INLINE CvParamLattice cvDefaultParamLattice( void )
#define CV_TYPE_NAME_ML_SVM "opencv-ml-svm"
#define CV_TYPE_NAME_ML_KNN "opencv-ml-knn"
#define CV_TYPE_NAME_ML_NBAYES "opencv-ml-bayesian"
#define CV_TYPE_NAME_ML_EM "opencv-ml-em"
#define CV_TYPE_NAME_ML_BOOSTING "opencv-ml-boost-tree"
#define CV_TYPE_NAME_ML_TREE "opencv-ml-tree"
#define CV_TYPE_NAME_ML_ANN_MLP "opencv-ml-ann-mlp"
@ -562,100 +561,6 @@ private:
CvSVM& operator = (const CvSVM&);
};
/****************************************************************************************\
* Expectation - Maximization *
\****************************************************************************************/
namespace cv
{
class EM : public Algorithm
{
public:
// Type of covariation matrices
enum {COV_MAT_SPHERICAL=0, COV_MAT_DIAGONAL=1, COV_MAT_GENERIC=2, COV_MAT_DEFAULT=COV_MAT_DIAGONAL};
// Default parameters
enum {DEFAULT_NCLUSTERS=5, DEFAULT_MAX_ITERS=100};
// The initial step
enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
CV_WRAP EM(int nclusters=EM::DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, FLT_EPSILON));
virtual ~EM();
CV_WRAP virtual void clear();
CV_WRAP virtual bool train(InputArray samples,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
CV_WRAP virtual bool trainE(InputArray samples,
InputArray means0,
InputArray covs0=noArray(),
InputArray weights0=noArray(),
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
CV_WRAP virtual bool trainM(InputArray samples,
InputArray probs0,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray());
CV_WRAP Vec2d predict(InputArray sample,
OutputArray probs=noArray()) const;
CV_WRAP bool isTrained() const;
AlgorithmInfo* info() const;
virtual void read(const FileNode& fn);
protected:
virtual void setTrainData(int startStep, const Mat& samples,
const Mat* probs0,
const Mat* means0,
const std::vector<Mat>* covs0,
const Mat* weights0);
bool doTrain(int startStep,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs);
virtual void eStep();
virtual void mStep();
void clusterTrainSamples();
void decomposeCovs();
void computeLogWeightDivDet();
Vec2d computeProbabilities(const Mat& sample, Mat* probs) const;
// all inner matrices have type CV_64FC1
CV_PROP_RW int nclusters;
CV_PROP_RW int covMatType;
CV_PROP_RW int maxIters;
CV_PROP_RW double epsilon;
Mat trainSamples;
Mat trainProbs;
Mat trainLogLikelihoods;
Mat trainLabels;
CV_PROP Mat weights;
CV_PROP Mat means;
CV_PROP std::vector<Mat> covs;
std::vector<Mat> covsEigenValues;
std::vector<Mat> covsRotateMats;
std::vector<Mat> invCovsEigenValues;
Mat logWeightDivDet;
};
} // namespace cv
/****************************************************************************************\
* Decision Tree *
\****************************************************************************************/\
@ -2155,8 +2060,6 @@ typedef CvGBTreesParams GradientBoostingTreeParams;
typedef CvGBTrees GradientBoostingTrees;
template<> void DefaultDeleter<CvDTreeSplit>::operator ()(CvDTreeSplit* obj) const;
bool initModule_ml(void);
}
#endif // __cplusplus

View File

@ -100,7 +100,7 @@ RECURSIVE = YES
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = *.inl.hpp *.impl.hpp *_detail.hpp */cudev/**/detail/*.hpp
EXCLUDE_SYMBOLS = cv::DataType<*> int
EXCLUDE_SYMBOLS = cv::DataType<*> int void
EXAMPLE_PATH = @CMAKE_DOXYGEN_EXAMPLE_PATH@
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = YES
@ -243,7 +243,11 @@ PREDEFINED = __cplusplus=1 \
CV_NORETURN= \
CV_DEFAULT(x)=" = x" \
CV_NEON=1 \
FLANN_DEPRECATED=
FLANN_DEPRECATED= \
"CV_PURE_PROPERTY(type, name)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(type val) = 0;" \
"CV_IMPL_PROPERTY_S(type, name, x)= /** \@see set##name */ virtual type get##name() const = 0; /** \@copybrief get##name \@see get##name */ virtual void set##name(const type & val);" \
"CV_IMPL_PROPERTY_RO(type, name, x)= virtual type get##name() const;"
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
TAGFILES =

View File

@ -1,8 +1,6 @@
Introduction to Support Vector Machines {#tutorial_introduction_to_svm}
=======================================
@todo update this tutorial
Goal
----
@ -31,13 +29,11 @@ understand that this is done only because our intuition is better built from exa
to imagine. However, the same concepts apply to tasks where the examples to classify lie in a space
whose dimension is higher than two.
In the above picture you can see that there exists multiple
lines that offer a solution to the problem. Is any of them better than the others? We can
intuitively define a criterion to estimate the worth of the lines:
- A line is bad if it passes too close to the points because it will be noise sensitive and it will
not generalize correctly. Therefore, our goal should be to find the line passing as far as
possible from all points.
In the above picture you can see that there exists multiple lines that offer a solution to the
problem. Is any of them better than the others? We can intuitively define a criterion to estimate
the worth of the lines: <em> A line is bad if it passes too close to the points because it will be
noise sensitive and it will not generalize correctly. </em> Therefore, our goal should be to find
the line passing as far as possible from all points.
Then, the operation of the SVM algorithm is based on finding the hyperplane that gives the largest
minimum distance to the training examples. Twice, this distance receives the important name of
@ -57,7 +53,7 @@ where \f$\beta\f$ is known as the *weight vector* and \f$\beta_{0}\f$ as the *bi
@sa A more in depth description of this and hyperplanes you can find in the section 4.5 (*Seperating
Hyperplanes*) of the book: *Elements of Statistical Learning* by T. Hastie, R. Tibshirani and J. H.
Friedman.
Friedman (@cite HTF01).
The optimal hyperplane can be represented in an infinite number of different ways by
scaling of \f$\beta\f$ and \f$\beta_{0}\f$. As a matter of convention, among all the possible
@ -107,17 +103,14 @@ Explanation
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
two different classes; one of the classes consists of one point and the other of three points.
@code{.cpp}
float labels[4] = {1.0, -1.0, -1.0, -1.0};
float trainingData[4][2] = {{501, 10}, {255, 10}, {501, 255}, {10, 501}};
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup1
The function @ref cv::ml::SVM::train that will be used afterwards requires the training data to be
stored as @ref cv::Mat objects of floats. Therefore, we create these objects from the arrays
defined above:
@code{.cpp}
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
Mat labelsMat (4, 1, CV_32FC1, labels);
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp setup2
-# **Set up SVM's parameters**
@ -126,42 +119,35 @@ Explanation
used in a wide variety of problems (e.g. problems with non-linearly separable data, a SVM using
a kernel function to raise the dimensionality of the examples, etc). As a consequence of this,
we have to define some parameters before training the SVM. These parameters are stored in an
object of the class @ref cv::ml::SVM::Params .
@code{.cpp}
ml::SVM::Params params;
params.svmType = ml::SVM::C_SVC;
params.kernelType = ml::SVM::LINEAR;
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6);
@endcode
- *Type of SVM*. We choose here the type **ml::SVM::C_SVC** that can be used for n-class
classification (n \f$\geq\f$ 2). This parameter is defined in the attribute
*ml::SVM::Params.svmType*.
object of the class @ref cv::ml::SVM.
The important feature of the type of SVM **CvSVM::C_SVC** deals with imperfect separation of classes (i.e. when the training data is non-linearly separable). This feature is not important here since the data is linearly separable and we chose this SVM type only for being the most commonly used.
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp init
Here:
- *Type of SVM*. We choose here the type @ref cv::ml::SVM::C_SVC "C_SVC" that can be used for
n-class classification (n \f$\geq\f$ 2). The important feature of this type is that it deals
with imperfect separation of classes (i.e. when the training data is non-linearly separable).
This feature is not important here since the data is linearly separable and we chose this SVM
type only for being the most commonly used.
- *Type of SVM kernel*. We have not talked about kernel functions since they are not
interesting for the training data we are dealing with. Nevertheless, let's explain briefly
now the main idea behind a kernel function. It is a mapping done to the training data to
improve its resemblance to a linearly separable set of data. This mapping consists of
increasing the dimensionality of the data and is done efficiently using a kernel function.
We choose here the type **ml::SVM::LINEAR** which means that no mapping is done. This
parameter is defined in the attribute *ml::SVMParams.kernel_type*.
interesting for the training data we are dealing with. Nevertheless, let's explain briefly now
the main idea behind a kernel function. It is a mapping done to the training data to improve
its resemblance to a linearly separable set of data. This mapping consists of increasing the
dimensionality of the data and is done efficiently using a kernel function. We choose here the
type @ref cv::ml::SVM::LINEAR "LINEAR" which means that no mapping is done. This parameter is
defined using cv::ml::SVM::setKernel.
- *Termination criteria of the algorithm*. The SVM training procedure is implemented solving a
constrained quadratic optimization problem in an **iterative** fashion. Here we specify a
maximum number of iterations and a tolerance error so we allow the algorithm to finish in
less number of steps even if the optimal hyperplane has not been computed yet. This
parameter is defined in a structure @ref cv::cvTermCriteria .
parameter is defined in a structure @ref cv::TermCriteria .
-# **Train the SVM**
We call the method @ref cv::ml::SVM::train to build the SVM model.
We call the method
[CvSVM::train](http://docs.opencv.org/modules/ml/doc/support_vector_machines.html#cvsvm-train)
to build the SVM model.
@code{.cpp}
CvSVM SVM;
SVM.train(trainingDataMat, labelsMat, Mat(), Mat(), params);
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp train
-# **Regions classified by the SVM**
@ -170,22 +156,8 @@ Explanation
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
green if it is the class with label 1 and in blue if it is the class with label -1.
@code{.cpp}
Vec3b green(0,255,0), blue (255,0,0);
for (int i = 0; i < image.rows; ++i)
for (int j = 0; j < image.cols; ++j)
{
Mat sampleMat = (Mat_<float>(1,2) << i,j);
float response = SVM.predict(sampleMat);
if (response == 1)
image.at<Vec3b>(j, i) = green;
else
if (response == -1)
image.at<Vec3b>(j, i) = blue;
}
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show
-# **Support vectors**
@ -193,15 +165,8 @@ Explanation
The method @ref cv::ml::SVM::getSupportVectors obtain all of the support
vectors. We have used this methods here to find the training examples that are
support vectors and highlight them.
@code{.cpp}
int c = SVM.get_support_vector_count();
for (int i = 0; i < c; ++i)
{
const float* v = SVM.get_support_vector(i); // get and then highlight with grayscale
circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
}
@endcode
@snippet cpp/tutorial_code/ml/introduction_to_svm/introduction_to_svm.cpp show_vectors
Results
-------

View File

@ -1,8 +1,6 @@
Support Vector Machines for Non-Linearly Separable Data {#tutorial_non_linear_svms}
=======================================================
@todo update this tutorial
Goal
----
@ -10,21 +8,20 @@ In this tutorial you will learn how to:
- Define the optimization problem for SVMs when it is not possible to separate linearly the
training data.
- How to configure the parameters in @ref cv::ml::SVM::Params to adapt your SVM for this class of
problems.
- How to configure the parameters to adapt your SVM for this class of problems.
Motivation
----------
Why is it interesting to extend the SVM optimation problem in order to handle non-linearly separable
training data? Most of the applications in which SVMs are used in computer vision require a more
powerful tool than a simple linear classifier. This stems from the fact that in these tasks **the
training data can be rarely separated using an hyperplane**.
powerful tool than a simple linear classifier. This stems from the fact that in these tasks __the
training data can be rarely separated using an hyperplane__.
Consider one of these tasks, for example, face detection. The training data in this case is composed
by a set of images that are faces and another set of images that are non-faces (*every other thing
in the world except from faces*). This training data is too complex so as to find a representation
of each sample (*feature vector*) that could make the whole set of faces linearly separable from the
by a set of images that are faces and another set of images that are non-faces (_every other thing
in the world except from faces_). This training data is too complex so as to find a representation
of each sample (_feature vector_) that could make the whole set of faces linearly separable from the
whole set of non-faces.
Extension of the Optimization Problem
@ -32,13 +29,13 @@ Extension of the Optimization Problem
Remember that using SVMs we obtain a separating hyperplane. Therefore, since the training data is
now non-linearly separable, we must admit that the hyperplane found will misclassify some of the
samples. This *misclassification* is a new variable in the optimization that must be taken into
samples. This _misclassification_ is a new variable in the optimization that must be taken into
account. The new model has to include both the old requirement of finding the hyperplane that gives
the biggest margin and the new one of generalizing the training data correctly by not allowing too
many classification errors.
We start here from the formulation of the optimization problem of finding the hyperplane which
maximizes the **margin** (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm):
maximizes the __margin__ (this is explained in the previous tutorial (@ref tutorial_introduction_to_svm):
\f[\min_{\beta, \beta_{0}} L(\beta) = \frac{1}{2}||\beta||^{2} \text{ subject to } y_{i}(\beta^{T} x_{i} + \beta_{0}) \geq 1 \text{ } \forall i\f]
@ -50,8 +47,8 @@ constant times the number of misclassification errors in the training data, i.e.
However, this one is not a very good solution since, among some other reasons, we do not distinguish
between samples that are misclassified with a small distance to their appropriate decision region or
samples that are not. Therefore, a better solution will take into account the *distance of the
misclassified samples to their correct decision regions*, i.e.:
samples that are not. Therefore, a better solution will take into account the _distance of the
misclassified samples to their correct decision regions_, i.e.:
\f[\min ||\beta||^{2} + C \text{(distance of misclassified samples to their correct regions)}\f]
@ -68,7 +65,7 @@ distances of the rest of the samples are zero since they lay already in their co
region.
The red and blue lines that appear on the picture are the margins to each one of the
decision regions. It is very **important** to realize that each of the \f$\xi_{i}\f$ goes from a
decision regions. It is very __important__ to realize that each of the \f$\xi_{i}\f$ goes from a
misclassified training sample to the margin of its appropriate region.
Finally, the new formulation for the optimization problem is:
@ -79,26 +76,25 @@ How should the parameter C be chosen? It is obvious that the answer to this ques
the training data is distributed. Although there is no general answer, it is useful to take into
account these rules:
- Large values of C give solutions with *less misclassification errors* but a *smaller margin*.
- Large values of C give solutions with _less misclassification errors_ but a _smaller margin_.
Consider that in this case it is expensive to make misclassification errors. Since the aim of
the optimization is to minimize the argument, few misclassifications errors are allowed.
- Small values of C give solutions with *bigger margin* and *more classification errors*. In this
- Small values of C give solutions with _bigger margin_ and _more classification errors_. In this
case the minimization does not consider that much the term of the sum so it focuses more on
finding a hyperplane with big margin.
Source Code
-----------
You may also find the source code and these video file in the
`samples/cpp/tutorial_code/gpu/non_linear_svms/non_linear_svms` folder of the OpenCV source library
or [download it from here ](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
You may also find the source code in `samples/cpp/tutorial_code/ml/non_linear_svms` folder of the OpenCV source library or
[download it from here](https://github.com/Itseez/opencv/tree/master/samples/cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp).
@includelineno cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp
Explanation
-----------
-# **Set up the training data**
-# __Set up the training data__
The training data of this exercise is formed by a set of labeled 2D-points that belong to one of
two different classes. To make the exercise more appealing, the training data is generated
@ -107,136 +103,67 @@ Explanation
We have divided the generation of the training data into two main parts.
In the first part we generate data for both classes that is linearly separable.
@code{.cpp}
// Generate random points for the class 1
Mat trainClass = trainData.rowRange(0, nLinearSamples);
// The x coordinate of the points is in [0, 0.4)
Mat c = trainClass.colRange(0, 1);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(0.4 * WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup1
// Generate random points for the class 2
trainClass = trainData.rowRange(2*NTRAINING_SAMPLES-nLinearSamples, 2*NTRAINING_SAMPLES);
// The x coordinate of the points is in [0.6, 1]
c = trainClass.colRange(0 , 1);
rng.fill(c, RNG::UNIFORM, Scalar(0.6*WIDTH), Scalar(WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
@endcode
In the second part we create data for both classes that is non-linearly separable, data that
overlaps.
@code{.cpp}
// Generate random points for the classes 1 and 2
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
// The x coordinate of the points is in [0.4, 0.6)
c = trainClass.colRange(0,1);
rng.fill(c, RNG::UNIFORM, Scalar(0.4*WIDTH), Scalar(0.6*WIDTH));
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
@endcode
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp setup2
-# **Set up SVM's parameters**
-# __Set up SVM's parameters__
@sa
In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the atributes of the
class @ref cv::ml::SVM::Params that we configure here before training the SVM.
@note In the previous tutorial @ref tutorial_introduction_to_svm there is an explanation of the
atributes of the class @ref cv::ml::SVM that we configure here before training the SVM.
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp init
@code{.cpp}
CvSVMParams params;
params.svm_type = SVM::C_SVC;
params.C = 0.1;
params.kernel_type = SVM::LINEAR;
params.term_crit = TermCriteria(TermCriteria::ITER, (int)1e7, 1e-6);
@endcode
There are just two differences between the configuration we do here and the one that was done in
the previous tutorial (tutorial_introduction_to_svm) that we use as reference.
the previous tutorial (@ref tutorial_introduction_to_svm) that we use as reference.
- *CvSVM::C_SVC*. We chose here a small value of this parameter in order not to punish too much
the misclassification errors in the optimization. The idea of doing this stems from the will
of obtaining a solution close to the one intuitively expected. However, we recommend to get a
- _C_. We chose here a small value of this parameter in order not to punish too much the
misclassification errors in the optimization. The idea of doing this stems from the will of
obtaining a solution close to the one intuitively expected. However, we recommend to get a
better insight of the problem by making adjustments to this parameter.
@note Here there are just very few points in the overlapping region between classes, giving a smaller value to **FRAC_LINEAR_SEP** the density of points can be incremented and the impact of the parameter **CvSVM::C_SVC** explored deeply.
@note In this case there are just very few points in the overlapping region between classes.
By giving a smaller value to __FRAC_LINEAR_SEP__ the density of points can be incremented and the
impact of the parameter _C_ explored deeply.
- *Termination Criteria of the algorithm*. The maximum number of iterations has to be
- _Termination Criteria of the algorithm_. The maximum number of iterations has to be
increased considerably in order to solve correctly a problem with non-linearly separable
training data. In particular, we have increased in five orders of magnitude this value.
-# **Train the SVM**
-# __Train the SVM__
We call the method @ref cv::ml::SVM::train to build the SVM model. Watch out that the training
process may take a quite long time. Have patiance when your run the program.
@code{.cpp}
CvSVM svm;
svm.train(trainData, labels, Mat(), Mat(), params);
@endcode
-# **Show the Decision Regions**
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp train
-# __Show the Decision Regions__
The method @ref cv::ml::SVM::predict is used to classify an input sample using a trained SVM. In
this example we have used this method in order to color the space depending on the prediction done
by the SVM. In other words, an image is traversed interpreting its pixels as points of the
Cartesian plane. Each of the points is colored depending on the class predicted by the SVM; in
dark green if it is the class with label 1 and in dark blue if it is the class with label 2.
@code{.cpp}
Vec3b green(0,100,0), blue (100,0,0);
for (int i = 0; i < I.rows; ++i)
for (int j = 0; j < I.cols; ++j)
{
Mat sampleMat = (Mat_<float>(1,2) << i, j);
float response = svm.predict(sampleMat);
if (response == 1) I.at<Vec3b>(j, i) = green;
else if (response == 2) I.at<Vec3b>(j, i) = blue;
}
@endcode
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show
-# **Show the training data**
-# __Show the training data__
The method @ref cv::circle is used to show the samples that compose the training data. The samples
of the class labeled with 1 are shown in light green and in light blue the samples of the class
labeled with 2.
@code{.cpp}
int thick = -1;
int lineType = 8;
float px, py;
// Class 1
for (int i = 0; i < NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(0, 255, 0), thick, lineType);
}
// Class 2
for (int i = NTRAINING_SAMPLES; i <2*NTRAINING_SAMPLES; ++i)
{
px = trainData.at<float>(i,0);
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
}
@endcode
-# **Support vectors**
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_data
-# __Support vectors__
We use here a couple of methods to obtain information about the support vectors. The method
@ref cv::ml::SVM::getSupportVectors obtain all support vectors.
We have used this methods here to find the training examples that are
support vectors and highlight them.
@code{.cpp}
thick = 2;
lineType = 8;
int x = svm.get_support_vector_count();
@ref cv::ml::SVM::getSupportVectors obtain all support vectors. We have used this methods here
to find the training examples that are support vectors and highlight them.
for (int i = 0; i < x; ++i)
{
const float* v = svm.get_support_vector(i);
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
}
@endcode
@snippet cpp/tutorial_code/ml/non_linear_svms/non_linear_svms.cpp show_vectors
Results
-------

View File

@ -200,8 +200,6 @@ public:
void setCallback(const Ptr<LMSolver::Callback>& _cb) { cb = _cb; }
AlgorithmInfo* info() const;
Ptr<LMSolver::Callback> cb;
double epsx;
@ -211,15 +209,8 @@ public:
};
CV_INIT_ALGORITHM(LMSolverImpl, "LMSolver",
obj.info()->addParam(obj, "epsx", obj.epsx);
obj.info()->addParam(obj, "epsf", obj.epsf);
obj.info()->addParam(obj, "maxIters", obj.maxIters);
obj.info()->addParam(obj, "printInterval", obj.printInterval))
Ptr<LMSolver> createLMSolver(const Ptr<LMSolver::Callback>& cb, int maxIters)
{
CV_Assert( !LMSolverImpl_info_auto.name().empty() );
return makePtr<LMSolverImpl>(cb, maxIters);
}

View File

@ -256,8 +256,6 @@ public:
void setCallback(const Ptr<PointSetRegistrator::Callback>& _cb) { cb = _cb; }
AlgorithmInfo* info() const;
Ptr<PointSetRegistrator::Callback> cb;
int modelPoints;
bool checkPartialSubsets;
@ -378,25 +376,12 @@ public:
return result;
}
AlgorithmInfo* info() const;
};
CV_INIT_ALGORITHM(RANSACPointSetRegistrator, "PointSetRegistrator.RANSAC",
obj.info()->addParam(obj, "threshold", obj.threshold);
obj.info()->addParam(obj, "confidence", obj.confidence);
obj.info()->addParam(obj, "maxIters", obj.maxIters))
CV_INIT_ALGORITHM(LMeDSPointSetRegistrator, "PointSetRegistrator.LMeDS",
obj.info()->addParam(obj, "confidence", obj.confidence);
obj.info()->addParam(obj, "maxIters", obj.maxIters))
Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
int _modelPoints, double _threshold,
double _confidence, int _maxIters)
{
CV_Assert( !RANSACPointSetRegistrator_info_auto.name().empty() );
return Ptr<PointSetRegistrator>(
new RANSACPointSetRegistrator(_cb, _modelPoints, _threshold, _confidence, _maxIters));
}
@ -405,7 +390,6 @@ Ptr<PointSetRegistrator> createRANSACPointSetRegistrator(const Ptr<PointSetRegis
Ptr<PointSetRegistrator> createLMeDSPointSetRegistrator(const Ptr<PointSetRegistrator::Callback>& _cb,
int _modelPoints, double _confidence, int _maxIters)
{
CV_Assert( !LMeDSPointSetRegistrator_info_auto.name().empty() );
return Ptr<PointSetRegistrator>(
new LMeDSPointSetRegistrator(_cb, _modelPoints, _confidence, _maxIters));
}

View File

@ -1010,8 +1010,6 @@ public:
disp.convertTo(disp0, disp0.type(), 1./(1 << DISPARITY_SHIFT), 0);
}
AlgorithmInfo* info() const { return 0; }
int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }

View File

@ -865,8 +865,6 @@ public:
StereoMatcher::DISP_SCALE*params.speckleRange, buffer);
}
AlgorithmInfo* info() const { return 0; }
int getMinDisparity() const { return params.minDisparity; }
void setMinDisparity(int minDisparity) { params.minDisparity = minDisparity; }

View File

@ -2768,8 +2768,6 @@ public:
//////////////////////////////////////// Algorithm ////////////////////////////////////
class CV_EXPORTS Algorithm;
class CV_EXPORTS AlgorithmInfo;
struct CV_EXPORTS AlgorithmInfoData;
template<typename _Tp> struct ParamType {};
@ -2782,32 +2780,13 @@ matching, graph-cut etc.), background subtraction (which can be done using mixtu
models, codebook-based algorithm etc.), optical flow (block matching, Lucas-Kanade, Horn-Schunck
etc.).
The class provides the following features for all derived classes:
- so called "virtual constructor". That is, each Algorithm derivative is registered at program
start and you can get the list of registered algorithms and create instance of a particular
algorithm by its name (see Algorithm::create). If you plan to add your own algorithms, it is
good practice to add a unique prefix to your algorithms to distinguish them from other
algorithms.
- setting/retrieving algorithm parameters by name. If you used video capturing functionality
from OpenCV videoio module, you are probably familar with cvSetCaptureProperty(),
cvGetCaptureProperty(), VideoCapture::set() and VideoCapture::get(). Algorithm provides
similar method where instead of integer id's you specify the parameter names as text strings.
See Algorithm::set and Algorithm::get for details.
- reading and writing parameters from/to XML or YAML files. Every Algorithm derivative can store
all its parameters and then read them back. There is no need to re-implement it each time.
Here is example of SIFT use in your application via Algorithm interface:
@code
#include "opencv2/opencv.hpp"
#include "opencv2/xfeatures2d.hpp"
using namespace cv::xfeatures2d;
...
Ptr<Feature2D> sift = SIFT::create();
FileStorage fs("sift_params.xml", FileStorage::READ);
if( fs.isOpened() ) // if we have file with parameters, read them
{
@ -2817,322 +2796,72 @@ Here is example of SIFT use in your application via Algorithm interface:
else // else modify the parameters and store them; user can later edit the file to use different parameters
{
sift->setContrastThreshold(0.01f); // lower the contrast threshold, compared to the default value
{
WriteStructContext ws(fs, "sift_params", CV_NODE_MAP);
sift->write(fs);
}
}
Mat image = imread("myimage.png", 0), descriptors;
vector<KeyPoint> keypoints;
sift->detectAndCompute(image, noArray(), keypoints, descriptors);
@endcode
Creating Own Algorithms
-----------------------
If you want to make your own algorithm, derived from Algorithm, you should basically follow a few
conventions and add a little semi-standard piece of code to your class:
- Make a class and specify Algorithm as its base class.
- The algorithm parameters should be the class members. See Algorithm::get() for the list of
possible types of the parameters.
- Add public virtual method `AlgorithmInfo* info() const;` to your class.
- Add constructor function, AlgorithmInfo instance and implement the info() method. The simplest
way is to take <https://github.com/Itseez/opencv/tree/master/modules/ml/src/ml_init.cpp> as
the reference and modify it according to the list of your parameters.
- Add some public function (e.g. `initModule_<mymodule>()`) that calls info() of your algorithm
and put it into the same source file as info() implementation. This is to force C++ linker to
include this object file into the target application. See Algorithm::create() for details.
*/
class CV_EXPORTS_W Algorithm
{
public:
Algorithm();
virtual ~Algorithm();
/**Returns the algorithm name*/
String name() const;
/** @brief returns the algorithm parameter
The method returns value of the particular parameter. Since the compiler can not deduce the
type of the returned parameter, you should specify it explicitly in angle brackets. Here are
the allowed forms of get:
- myalgo.get\<int\>("param_name")
- myalgo.get\<double\>("param_name")
- myalgo.get\<bool\>("param_name")
- myalgo.get\<String\>("param_name")
- myalgo.get\<Mat\>("param_name")
- myalgo.get\<vector\<Mat\> \>("param_name")
- myalgo.get\<Algorithm\>("param_name") (it returns Ptr\<Algorithm\>).
In some cases the actual type of the parameter can be cast to the specified type, e.g. integer
parameter can be cast to double, bool can be cast to int. But "dangerous" transformations
(string\<-\>number, double-\>int, 1x1 Mat\<-\>number, ...) are not performed and the method
will throw an exception. In the case of Mat or vector\<Mat\> parameters the method does not
clone the matrix data, so do not modify the matrices. Use Algorithm::set instead - slower, but
more safe.
@param name The parameter name.
*/
template<typename _Tp> typename ParamType<_Tp>::member_type get(const String& name) const;
/** @overload */
template<typename _Tp> typename ParamType<_Tp>::member_type get(const char* name) const;
CV_WRAP int getInt(const String& name) const;
CV_WRAP double getDouble(const String& name) const;
CV_WRAP bool getBool(const String& name) const;
CV_WRAP String getString(const String& name) const;
CV_WRAP Mat getMat(const String& name) const;
CV_WRAP std::vector<Mat> getMatVector(const String& name) const;
CV_WRAP Ptr<Algorithm> getAlgorithm(const String& name) const;
/** @brief Sets the algorithm parameter
The method sets value of the particular parameter. Some of the algorithm
parameters may be declared as read-only. If you try to set such a
parameter, you will get exception with the corresponding error message.
@param name The parameter name.
@param value The parameter value.
*/
void set(const String& name, int value);
void set(const String& name, double value);
void set(const String& name, bool value);
void set(const String& name, const String& value);
void set(const String& name, const Mat& value);
void set(const String& name, const std::vector<Mat>& value);
void set(const String& name, const Ptr<Algorithm>& value);
template<typename _Tp> void set(const String& name, const Ptr<_Tp>& value);
CV_WRAP void setInt(const String& name, int value);
CV_WRAP void setDouble(const String& name, double value);
CV_WRAP void setBool(const String& name, bool value);
CV_WRAP void setString(const String& name, const String& value);
CV_WRAP void setMat(const String& name, const Mat& value);
CV_WRAP void setMatVector(const String& name, const std::vector<Mat>& value);
CV_WRAP void setAlgorithm(const String& name, const Ptr<Algorithm>& value);
template<typename _Tp> void setAlgorithm(const String& name, const Ptr<_Tp>& value);
void set(const char* name, int value);
void set(const char* name, double value);
void set(const char* name, bool value);
void set(const char* name, const String& value);
void set(const char* name, const Mat& value);
void set(const char* name, const std::vector<Mat>& value);
void set(const char* name, const Ptr<Algorithm>& value);
template<typename _Tp> void set(const char* name, const Ptr<_Tp>& value);
void setInt(const char* name, int value);
void setDouble(const char* name, double value);
void setBool(const char* name, bool value);
void setString(const char* name, const String& value);
void setMat(const char* name, const Mat& value);
void setMatVector(const char* name, const std::vector<Mat>& value);
void setAlgorithm(const char* name, const Ptr<Algorithm>& value);
template<typename _Tp> void setAlgorithm(const char* name, const Ptr<_Tp>& value);
CV_WRAP String paramHelp(const String& name) const;
int paramType(const char* name) const;
CV_WRAP int paramType(const String& name) const;
CV_WRAP void getParams(CV_OUT std::vector<String>& names) const;
/** @brief Stores algorithm parameters in a file storage
The method stores all the algorithm parameters (in alphabetic order) to
the file storage. The method is virtual. If you define your own
Algorithm derivative, your can override the method and store some extra
information. However, it's rarely needed. Here are some examples:
- SIFT feature detector (from xfeatures2d module). The class only
stores algorithm parameters and no keypoints or their descriptors.
Therefore, it's enough to store the algorithm parameters, which is
what Algorithm::write() does. Therefore, there is no dedicated
SIFT::write().
- Background subtractor (from video module). It has the algorithm
parameters and also it has the current background model. However,
the background model is not stored. First, it's rather big. Then,
if you have stored the background model, it would likely become
irrelevant on the next run (because of shifted camera, changed
background, different lighting etc.). Therefore,
BackgroundSubtractorMOG and BackgroundSubtractorMOG2 also rely on
the standard Algorithm::write() to store just the algorithm
parameters.
- Expectation Maximization (from ml module). The algorithm finds
mixture of gaussians that approximates user data best of all. In
this case the model may be re-used on the next run to test new
data against the trained statistical model. So EM needs to store
the model. However, since the model is described by a few
parameters that are available as read-only algorithm parameters
(i.e. they are available via EM::get()), EM also relies on
Algorithm::write() to store both EM parameters and the model
(represented by read-only algorithm parameters).
@param fs File storage.
*/
virtual void write(FileStorage& fs) const;
virtual void write(FileStorage& fs) const { (void)fs; }
/** @brief Reads algorithm parameters from a file storage
The method reads all the algorithm parameters from the specified node of
a file storage. Similarly to Algorithm::write(), if you implement an
algorithm that needs to read some extra data and/or re-compute some
internal data, you may override the method.
@param fn File node of the file storage.
*/
virtual void read(const FileNode& fn);
typedef Algorithm* (*Constructor)(void);
typedef int (Algorithm::*Getter)() const;
typedef void (Algorithm::*Setter)(int);
/** @brief Returns the list of registered algorithms
This static method returns the list of registered algorithms in
alphabetical order. Here is how to use it :
@code{.cpp}
vector<String> algorithms;
Algorithm::getList(algorithms);
cout << "Algorithms: " << algorithms.size() << endl;
for (size_t i=0; i < algorithms.size(); i++)
cout << algorithms[i] << endl;
@endcode
@param algorithms The output vector of algorithm names.
*/
CV_WRAP static void getList(CV_OUT std::vector<String>& algorithms);
CV_WRAP static Ptr<Algorithm> _create(const String& name);
/** @brief Creates algorithm instance by name
This static method creates a new instance of the specified algorithm. If
there is no such algorithm, the method will silently return a null
pointer. Also, you should specify the particular Algorithm subclass as
_Tp (or simply Algorithm if you do not know it at that point). :
@code{.cpp}
Ptr<BackgroundSubtractor> bgfg = Algorithm::create<BackgroundSubtractor>("BackgroundSubtractor.MOG2");
@endcode
@note This is important note about seemingly mysterious behavior of
Algorithm::create() when it returns NULL while it should not. The reason
is simple - Algorithm::create() resides in OpenCV's core module and the
algorithms are implemented in other modules. If you create algorithms
dynamically, C++ linker may decide to throw away the modules where the
actual algorithms are implemented, since you do not call any functions
from the modules. To avoid this problem, you need to call
initModule_\<modulename\>(); somewhere in the beginning of the program
before Algorithm::create(). For example, call initModule_xfeatures2d()
in order to use SURF/SIFT, call initModule_ml() to use expectation
maximization etc.
@param name The algorithm name, one of the names returned by Algorithm::getList().
*/
template<typename _Tp> static Ptr<_Tp> create(const String& name);
virtual AlgorithmInfo* info() const /* TODO: make it = 0;*/ { return 0; }
virtual void read(const FileNode& fn) { (void)fn; }
};
/** @todo document */
class CV_EXPORTS AlgorithmInfo
{
public:
friend class Algorithm;
AlgorithmInfo(const String& name, Algorithm::Constructor create);
~AlgorithmInfo();
void get(const Algorithm* algo, const char* name, int argType, void* value) const;
void addParam_(Algorithm& algo, const char* name, int argType,
void* value, bool readOnly,
Algorithm::Getter getter, Algorithm::Setter setter,
const String& help=String());
String paramHelp(const char* name) const;
int paramType(const char* name) const;
void getParams(std::vector<String>& names) const;
// define properties
void write(const Algorithm* algo, FileStorage& fs) const;
void read(Algorithm* algo, const FileNode& fn) const;
String name() const;
#define CV_PURE_PROPERTY(type, name) \
CV_WRAP virtual type get##name() const = 0; \
CV_WRAP virtual void set##name(type val) = 0;
void addParam(Algorithm& algo, const char* name,
int& value, bool readOnly=false,
int (Algorithm::*getter)()=0,
void (Algorithm::*setter)(int)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
bool& value, bool readOnly=false,
int (Algorithm::*getter)()=0,
void (Algorithm::*setter)(int)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
double& value, bool readOnly=false,
double (Algorithm::*getter)()=0,
void (Algorithm::*setter)(double)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
String& value, bool readOnly=false,
String (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const String&)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
Mat& value, bool readOnly=false,
Mat (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const Mat&)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
std::vector<Mat>& value, bool readOnly=false,
std::vector<Mat> (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const std::vector<Mat>&)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
Ptr<Algorithm>& value, bool readOnly=false,
Ptr<Algorithm> (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const Ptr<Algorithm>&)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
float& value, bool readOnly=false,
float (Algorithm::*getter)()=0,
void (Algorithm::*setter)(float)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
unsigned int& value, bool readOnly=false,
unsigned int (Algorithm::*getter)()=0,
void (Algorithm::*setter)(unsigned int)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
uint64& value, bool readOnly=false,
uint64 (Algorithm::*getter)()=0,
void (Algorithm::*setter)(uint64)=0,
const String& help=String());
void addParam(Algorithm& algo, const char* name,
uchar& value, bool readOnly=false,
uchar (Algorithm::*getter)()=0,
void (Algorithm::*setter)(uchar)=0,
const String& help=String());
template<typename _Tp, typename _Base> void addParam(Algorithm& algo, const char* name,
Ptr<_Tp>& value, bool readOnly=false,
Ptr<_Tp> (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const Ptr<_Tp>&)=0,
const String& help=String());
template<typename _Tp> void addParam(Algorithm& algo, const char* name,
Ptr<_Tp>& value, bool readOnly=false,
Ptr<_Tp> (Algorithm::*getter)()=0,
void (Algorithm::*setter)(const Ptr<_Tp>&)=0,
const String& help=String());
protected:
AlgorithmInfoData* data;
void set(Algorithm* algo, const char* name, int argType,
const void* value, bool force=false) const;
#define CV_PURE_PROPERTY_S(type, name) \
CV_WRAP virtual type get##name() const = 0; \
CV_WRAP virtual void set##name(const type & val) = 0;
#define CV_PURE_PROPERTY_RO(type, name) \
CV_WRAP virtual type get##name() const = 0;
// basic property implementation
#define CV_IMPL_PROPERTY_RO(type, name, member) \
inline type get##name() const { return member; }
#define CV_HELP_IMPL_PROPERTY(r_type, w_type, name, member) \
CV_IMPL_PROPERTY_RO(r_type, name, member) \
inline void set##name(w_type val) { member = val; }
#define CV_HELP_WRAP_PROPERTY(r_type, w_type, name, internal_name, internal_obj) \
r_type get##name() const { return internal_obj.get##internal_name(); } \
void set##name(w_type val) { internal_obj.set##internal_name(val); }
#define CV_IMPL_PROPERTY(type, name, member) CV_HELP_IMPL_PROPERTY(type, type, name, member)
#define CV_IMPL_PROPERTY_S(type, name, member) CV_HELP_IMPL_PROPERTY(type, const type &, name, member)
#define CV_WRAP_PROPERTY(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, type, name, internal_name, internal_obj)
#define CV_WRAP_PROPERTY_S(type, name, internal_name, internal_obj) CV_HELP_WRAP_PROPERTY(type, const type &, name, internal_name, internal_obj)
#define CV_WRAP_SAME_PROPERTY(type, name, internal_obj) CV_WRAP_PROPERTY(type, name, name, internal_obj)
#define CV_WRAP_SAME_PROPERTY_S(type, name, internal_obj) CV_WRAP_PROPERTY_S(type, name, name, internal_obj)
struct Param {
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7,
UNSIGNED_INT=8, UINT64=9, UCHAR=11 };
};
/** @todo document */
struct CV_EXPORTS Param
{
enum { INT=0, BOOLEAN=1, REAL=2, STRING=3, MAT=4, MAT_VECTOR=5, ALGORITHM=6, FLOAT=7, UNSIGNED_INT=8, UINT64=9, UCHAR=11 };
Param();
Param(int _type, bool _readonly, int _offset,
Algorithm::Getter _getter=0,
Algorithm::Setter _setter=0,
const String& _help=String());
int type;
int offset;
bool readonly;
Algorithm::Getter getter;
Algorithm::Setter setter;
String help;
};
template<> struct ParamType<bool>
{

View File

@ -412,84 +412,6 @@ int print(const Matx<_Tp, m, n>& matx, FILE* stream = stdout)
return print(Formatter::get()->format(cv::Mat(matx)), stream);
}
////////////////////////////////////////// Algorithm //////////////////////////////////////////
template<typename _Tp> inline
Ptr<_Tp> Algorithm::create(const String& name)
{
return _create(name).dynamicCast<_Tp>();
}
template<typename _Tp> inline
void Algorithm::set(const char* _name, const Ptr<_Tp>& value)
{
Ptr<Algorithm> algo_ptr = value. template dynamicCast<cv::Algorithm>();
if (!algo_ptr) {
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
}
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
}
template<typename _Tp> inline
void Algorithm::set(const String& _name, const Ptr<_Tp>& value)
{
this->set<_Tp>(_name.c_str(), value);
}
template<typename _Tp> inline
void Algorithm::setAlgorithm(const char* _name, const Ptr<_Tp>& value)
{
Ptr<Algorithm> algo_ptr = value. template ptr<cv::Algorithm>();
if (!algo_ptr) {
CV_Error( Error::StsUnsupportedFormat, "unknown/unsupported Ptr type of the second parameter of the method Algorithm::set");
}
info()->set(this, _name, ParamType<Algorithm>::type, &algo_ptr);
}
template<typename _Tp> inline
void Algorithm::setAlgorithm(const String& _name, const Ptr<_Tp>& value)
{
this->set<_Tp>(_name.c_str(), value);
}
template<typename _Tp> inline
typename ParamType<_Tp>::member_type Algorithm::get(const String& _name) const
{
typename ParamType<_Tp>::member_type value;
info()->get(this, _name.c_str(), ParamType<_Tp>::type, &value);
return value;
}
template<typename _Tp> inline
typename ParamType<_Tp>::member_type Algorithm::get(const char* _name) const
{
typename ParamType<_Tp>::member_type value;
info()->get(this, _name, ParamType<_Tp>::type, &value);
return value;
}
template<typename _Tp, typename _Base> inline
void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly,
Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
const String& help)
{
//TODO: static assert: _Tp inherits from _Base
addParam_(algo, parameter, ParamType<_Base>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
template<typename _Tp> inline
void AlgorithmInfo::addParam(Algorithm& algo, const char* parameter, Ptr<_Tp>& value, bool readOnly,
Ptr<_Tp> (Algorithm::*getter)(), void (Algorithm::*setter)(const Ptr<_Tp>&),
const String& help)
{
//TODO: static assert: _Tp inherits from Algorithm
addParam_(algo, parameter, ParamType<Algorithm>::type, &value, readOnly,
(Algorithm::Getter)getter, (Algorithm::Setter)setter, help);
}
//! @endcond
/****************************************************************************************\

View File

@ -129,40 +129,6 @@ namespace cv
CV_EXPORTS const char* currentParallelFramework();
} //namespace cv
#define CV_INIT_ALGORITHM(classname, algname, memberinit) \
static inline ::cv::Algorithm* create##classname##_hidden() \
{ \
return new classname; \
} \
\
static inline ::cv::Ptr< ::cv::Algorithm> create##classname##_ptr_hidden() \
{ \
return ::cv::makePtr<classname>(); \
} \
\
static inline ::cv::AlgorithmInfo& classname##_info() \
{ \
static ::cv::AlgorithmInfo classname##_info_var(algname, create##classname##_hidden); \
return classname##_info_var; \
} \
\
static ::cv::AlgorithmInfo& classname##_info_auto = classname##_info(); \
\
::cv::AlgorithmInfo* classname::info() const \
{ \
static volatile bool initialized = false; \
\
if( !initialized ) \
{ \
initialized = true; \
classname obj; \
memberinit; \
} \
return &classname##_info(); \
}
/****************************************************************************************\
* Common declarations *
\****************************************************************************************/

File diff suppressed because it is too large Load Diff

View File

@ -140,8 +140,6 @@ namespace
public:
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
cv::AlgorithmInfo* info() const;
void apply(cv::InputArray src, cv::OutputArray dst);
void apply(InputArray src, OutputArray dst, Stream& stream);
@ -167,11 +165,6 @@ namespace
{
}
CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE_CUDA",
obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
obj.info()->addParam(obj, "tilesX", obj.tilesX_);
obj.info()->addParam(obj, "tilesY", obj.tilesY_))
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
{
apply(_src, _dst, Stream::Null());

View File

@ -310,10 +310,10 @@ PERF_TEST_P(ImagePair, OpticalFlowDual_TVL1,
{
cv::Mat flow;
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
alg->set("medianFiltering", 1);
alg->set("innerIterations", 1);
alg->set("outerIterations", 300);
cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
alg->setMedianFiltering(1);
alg->setInnerIterations(1);
alg->setOuterIterations(300);
TEST_CYCLE() alg->calc(frame0, frame1, flow);
CPU_SANITY_CHECK(flow);

View File

@ -369,11 +369,11 @@ CUDA_TEST_P(OpticalFlowDual_TVL1, Accuracy)
cv::cuda::GpuMat d_flow;
d_alg->calc(loadMat(frame0), loadMat(frame1), d_flow);
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
alg->set("medianFiltering", 1);
alg->set("innerIterations", 1);
alg->set("outerIterations", d_alg->getNumIterations());
alg->set("gamma", gamma);
cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
alg->setMedianFiltering(1);
alg->setInnerIterations(1);
alg->setOuterIterations(d_alg->getNumIterations());
alg->setGamma(gamma);
cv::Mat flow;
alg->calc(frame0, frame1, flow);

View File

@ -320,8 +320,6 @@ namespace
public:
CLAHE_Impl(double clipLimit = 40.0, int tilesX = 8, int tilesY = 8);
cv::AlgorithmInfo* info() const;
void apply(cv::InputArray src, cv::OutputArray dst);
void setClipLimit(double clipLimit);
@ -351,11 +349,6 @@ namespace
{
}
CV_INIT_ALGORITHM(CLAHE_Impl, "CLAHE",
obj.info()->addParam(obj, "clipLimit", obj.clipLimit_);
obj.info()->addParam(obj, "tilesX", obj.tilesX_);
obj.info()->addParam(obj, "tilesY", obj.tilesY_))
void CLAHE_Impl::apply(cv::InputArray _src, cv::OutputArray _dst)
{
CV_Assert( _src.type() == CV_8UC1 || _src.type() == CV_16UC1 );

View File

@ -449,40 +449,33 @@ classes 0 and 1, one can determine that the given data instance belongs to class
\geq 0.5\f$ or class 0 if \f$h_\theta(x) < 0.5\f$ .
In Logistic Regression, choosing the right parameters is of utmost importance for reducing the
training error and ensuring high training accuracy. cv::ml::LogisticRegression::Params is the
structure that defines parameters that are required to train a Logistic Regression classifier.
training error and ensuring high training accuracy:
The learning rate is determined by cv::ml::LogisticRegression::Params.alpha. It determines how fast
we approach the solution. It is a positive real number.
- The learning rate can be set with @ref cv::ml::LogisticRegression::setLearningRate "setLearningRate"
method. It determines how fast we approach the solution. It is a positive real number.
Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported in
LogisticRegression. It is important that we mention the number of iterations these optimization
algorithms have to run. The number of iterations are mentioned by
cv::ml::LogisticRegression::Params.num_iters. The number of iterations can be thought as number of
steps taken and learning rate specifies if it is a long step or a short step. These two parameters
define how fast we arrive at a possible solution.
- Optimization algorithms like Batch Gradient Descent and Mini-Batch Gradient Descent are supported
in LogisticRegression. It is important that we mention the number of iterations these optimization
algorithms have to run. The number of iterations can be set with @ref
cv::ml::LogisticRegression::setIterations "setIterations". This parameter can be thought
as number of steps taken and learning rate specifies if it is a long step or a short step. This
and previous parameter define how fast we arrive at a possible solution.
In order to compensate for overfitting regularization is performed, which can be enabled by setting
cv::ml::LogisticRegression::Params.regularized to a positive integer (greater than zero). One can
specify what kind of regularization has to be performed by setting
cv::ml::LogisticRegression::Params.norm to REG_L1 or REG_L2 values.
- In order to compensate for overfitting regularization is performed, which can be enabled with
@ref cv::ml::LogisticRegression::setRegularization "setRegularization". One can specify what
kind of regularization has to be performed by passing one of @ref
cv::ml::LogisticRegression::RegKinds "regularization kinds" to this method.
LogisticRegression provides a choice of 2 training methods with Batch Gradient Descent or the Mini-
Batch Gradient Descent. To specify this, set cv::ml::LogisticRegression::Params::train_method to
either BATCH or MINI_BATCH. If training method is set to MINI_BATCH, the size of the mini batch has
to be to a postive integer using cv::ml::LogisticRegression::Params::mini_batch_size.
- Logistic regression implementation provides a choice of 2 training methods with Batch Gradient
Descent or the MiniBatch Gradient Descent. To specify this, call @ref
cv::ml::LogisticRegression::setTrainMethod "setTrainMethod" with either @ref
cv::ml::LogisticRegression::BATCH "LogisticRegression::BATCH" or @ref
cv::ml::LogisticRegression::MINI_BATCH "LogisticRegression::MINI_BATCH". If training method is
set to @ref cv::ml::LogisticRegression::MINI_BATCH "MINI_BATCH", the size of the mini batch has
to be to a postive integer set with @ref cv::ml::LogisticRegression::setMiniBatchSize
"setMiniBatchSize".
A sample set of training parameters for the Logistic Regression classifier can be initialized as
follows:
@code{.cpp}
using namespace cv::ml;
LogisticRegression::Params params;
params.alpha = 0.5;
params.num_iters = 10000;
params.norm = LogisticRegression::REG_L2;
params.regularized = 1;
params.train_method = LogisticRegression::MINI_BATCH;
params.mini_batch_size = 10;
@endcode
A sample set of training parameters for the Logistic Regression classifier can be initialized as follows:
@snippet samples/cpp/logistic_regression.cpp init
@sa cv::ml::LogisticRegression

View File

@ -381,43 +381,22 @@ public:
return model->isTrained() ? model : Ptr<_Tp>();
}
/** @brief Create and train model with default parameters
/** @brief Creates new statistical model and trains it
@param data training data that can be loaded from file using TrainData::loadFromCSV or
created with TrainData::create.
@param p model parameters
@param flags optional flags, depending on the model. Some of the models can be updated with the
new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).
The class must implement static `create()` method with no parameters or with all default parameter values
*/
template<typename _Tp> static Ptr<_Tp> train(const Ptr<TrainData>& data, const typename _Tp::Params& p, int flags=0)
template<typename _Tp> static Ptr<_Tp> train(const Ptr<TrainData>& data, int flags=0)
{
Ptr<_Tp> model = _Tp::create(p);
Ptr<_Tp> model = _Tp::create();
return !model.empty() && model->train(data, flags) ? model : Ptr<_Tp>();
}
/** @brief Creates new statistical model and trains it
@param samples training samples
@param layout See ml::SampleTypes.
@param responses vector of responses associated with the training samples.
@param p model parameters
@param flags optional flags, depending on the model. Some of the models can be updated with the
new training samples, not completely overwritten (such as NormalBayesClassifier or ANN_MLP).
*/
template<typename _Tp> static Ptr<_Tp> train(InputArray samples, int layout, InputArray responses,
const typename _Tp::Params& p, int flags=0)
{
Ptr<_Tp> model = _Tp::create(p);
return !model.empty() && model->train(TrainData::create(samples, layout, responses), flags) ? model : Ptr<_Tp>();
}
/** @brief Saves the model to a file.
In order to make this method work, the derived class must overwrite
Algorithm::write(FileStorage& fs).
*/
/** Saves the model to a file.
In order to make this method work, the derived class must implement Algorithm::write(FileStorage& fs). */
virtual void save(const String& filename) const;
/** Returns model string identifier.
This string is used as top level xml/yml node tag when model is saved to a file or string. */
virtual String getDefaultModelName() const = 0;
};
@ -432,11 +411,6 @@ public:
class CV_EXPORTS_W NormalBayesClassifier : public StatModel
{
public:
class CV_EXPORTS_W Params
{
public:
Params();
};
/** @brief Predicts the response for sample(s).
The method estimates the most probable classes for input vectors. Input vectors (one or more)
@ -447,21 +421,10 @@ public:
*/
virtual float predictProb( InputArray inputs, OutputArray outputs,
OutputArray outputProbs, int flags=0 ) const = 0;
virtual void setParams(const Params& params) = 0;
virtual Params getParams() const = 0;
/** @brief Creates empty model
@param params The model parameters. There is none so far, the structure is used as a placeholder
for possible extensions.
Use StatModel::train to train the model:
@code
StatModel::train<NormalBayesClassifier>(traindata, params); // to create and train the model
StatModel::load<NormalBayesClassifier>(filename); // load the pre-trained model
@endcode
*/
static Ptr<NormalBayesClassifier> create(const Params& params=Params());
/** Creates empty model
Use StatModel::train to train the model after creation. */
static Ptr<NormalBayesClassifier> create();
};
/****************************************************************************************\
@ -475,19 +438,18 @@ public:
class CV_EXPORTS_W KNearest : public StatModel
{
public:
class CV_EXPORTS_W_MAP Params
{
public:
/** @brief Constructor with parameters */
Params(int defaultK=10, bool isclassifier_=true, int Emax_=INT_MAX, int algorithmType_=BRUTE_FORCE);
CV_PROP_RW int defaultK; //!< default number of neighbors to use in predict method
CV_PROP_RW bool isclassifier; //!< whether classification or regression model should be trained
CV_PROP_RW int Emax; //!< for implementation with KDTree
CV_PROP_RW int algorithmType; //!< See KNearest::Types
};
virtual void setParams(const Params& p) = 0;
virtual Params getParams() const = 0;
/** Default number of neighbors to use in predict method. */
CV_PURE_PROPERTY(int, DefaultK)
/** Whether classification or regression model should be trained. */
CV_PURE_PROPERTY(bool, IsClassifier)
/** Parameter for KDTree implementation. */
CV_PURE_PROPERTY(int, Emax)
/** %Algorithm type, one of KNearest::Types. */
CV_PURE_PROPERTY(int, AlgorithmType)
/** @brief Finds the neighbors and predicts responses for input vectors.
@ -520,17 +482,19 @@ public:
OutputArray neighborResponses=noArray(),
OutputArray dist=noArray() ) const = 0;
enum Types { BRUTE_FORCE=1, KDTREE=2 };
/** @brief Implementations of KNearest algorithm
*/
enum Types
{
BRUTE_FORCE=1,
KDTREE=2
};
/** @brief Creates the empty model
@param params The model parameters
The static method creates empty %KNearest classifier. It should be then trained using train
method (see StatModel::train). Alternatively, you can load boost model from file using:
`StatModel::load<KNearest>(filename)`
The static method creates empty %KNearest classifier. It should be then trained using StatModel::train method.
*/
static Ptr<KNearest> create(const Params& params=Params());
static Ptr<KNearest> create();
};
/****************************************************************************************\
@ -544,54 +508,6 @@ public:
class CV_EXPORTS_W SVM : public StatModel
{
public:
/** @brief %SVM training parameters.
The structure must be initialized and passed to the training method of %SVM.
*/
class CV_EXPORTS_W_MAP Params
{
public:
/** @brief Default constructor */
Params();
/** @brief Constructor with parameters */
Params( int svm_type, int kernel_type,
double degree, double gamma, double coef0,
double Cvalue, double nu, double p,
const Mat& classWeights, TermCriteria termCrit );
/** Type of a %SVM formulation. See SVM::Types. Default value is SVM::C_SVC. */
CV_PROP_RW int svmType;
/** Type of a %SVM kernel. See SVM::KernelTypes. Default value is SVM::RBF. */
CV_PROP_RW int kernelType;
/** Parameter \f$\gamma\f$ of a kernel function (SVM::POLY / SVM::RBF / SVM::SIGMOID /
SVM::CHI2). Default value is 1. */
CV_PROP_RW double gamma;
/** Parameter coef0 of a kernel function (SVM::POLY / SVM::SIGMOID). Default value is 0. */
CV_PROP_RW double coef0;
/** Parameter degree of a kernel function (SVM::POLY). Default value is 0. */
CV_PROP_RW double degree;
/** Parameter C of a %SVM optimization problem (SVM::C_SVC / SVM::EPS_SVR / SVM::NU_SVR).
Default value is 0. */
CV_PROP_RW double C;
/** Parameter \f$\nu\f$ of a %SVM optimization problem (SVM::NU_SVC / SVM::ONE_CLASS /
SVM::NU_SVR). Default value is 0. */
CV_PROP_RW double nu;
/** Parameter \f$\epsilon\f$ of a %SVM optimization problem (SVM::EPS_SVR). Default value is 0. */
CV_PROP_RW double p;
/** Optional weights in the SVM::C_SVC problem , assigned to particular classes. They are
multiplied by C so the parameter C of class \#i becomes classWeights(i) \* C. Thus these
weights affect the misclassification penalty for different classes. The larger weight, the
larger penalty on misclassification of data from the corresponding class. Default value is
empty Mat.*/
CV_PROP_RW Mat classWeights;
/** Termination criteria of the iterative %SVM training procedure which solves a partial
case of constrained quadratic optimization problem. You can specify tolerance and/or the
maximum number of iterations. Default value is TermCriteria(
TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON );*/
CV_PROP_RW TermCriteria termCrit;
};
class CV_EXPORTS Kernel : public Algorithm
{
@ -600,6 +516,59 @@ public:
virtual void calc( int vcount, int n, const float* vecs, const float* another, float* results ) = 0;
};
/** Type of a %SVM formulation.
See SVM::Types. Default value is SVM::C_SVC. */
CV_PURE_PROPERTY(int, Type)
/** Parameter \f$\gamma\f$ of a kernel function.
For SVM::POLY, SVM::RBF, SVM::SIGMOID or SVM::CHI2. Default value is 1. */
CV_PURE_PROPERTY(double, Gamma)
/** Parameter _coef0_ of a kernel function.
For SVM::POLY or SVM::SIGMOID. Default value is 0.*/
CV_PURE_PROPERTY(double, Coef0)
/** Parameter _degree_ of a kernel function.
For SVM::POLY. Default value is 0. */
CV_PURE_PROPERTY(double, Degree)
/** Parameter _C_ of a %SVM optimization problem.
For SVM::C_SVC, SVM::EPS_SVR or SVM::NU_SVR. Default value is 0. */
CV_PURE_PROPERTY(double, C)
/** Parameter \f$\nu\f$ of a %SVM optimization problem.
For SVM::NU_SVC, SVM::ONE_CLASS or SVM::NU_SVR. Default value is 0. */
CV_PURE_PROPERTY(double, Nu)
/** Parameter \f$\epsilon\f$ of a %SVM optimization problem.
For SVM::EPS_SVR. Default value is 0. */
CV_PURE_PROPERTY(double, P)
/** Optional weights in the SVM::C_SVC problem, assigned to particular classes.
They are multiplied by _C_ so the parameter _C_ of class _i_ becomes `classWeights(i) * C`. Thus
these weights affect the misclassification penalty for different classes. The larger weight,
the larger penalty on misclassification of data from the corresponding class. Default value is
empty Mat. */
CV_PURE_PROPERTY_S(cv::Mat, ClassWeights)
/** Termination criteria of the iterative %SVM training procedure which solves a partial
case of constrained quadratic optimization problem.
You can specify tolerance and/or the maximum number of iterations. Default value is
`TermCriteria( TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, FLT_EPSILON )`; */
CV_PURE_PROPERTY_S(cv::TermCriteria, TermCriteria)
/** Type of a %SVM kernel.
See SVM::KernelTypes. Default value is SVM::RBF. */
virtual int getKernelType() const = 0;
/** Initialize with one of predefined kernels.
See SVM::KernelTypes. */
virtual void setKernel(int kernelType) = 0;
/** Initialize with custom kernel.
See SVM::Kernel class for implementation details */
virtual void setCustomKernel(const Ptr<Kernel> &_kernel) = 0;
//! %SVM type
enum Types {
/** C-Support Vector Classification. n-class classification (n \f$\geq\f$ 2), allows
@ -631,6 +600,7 @@ public:
![image](pics/SVM_Comparison.png)
*/
enum KernelTypes {
/** Returned by SVM::getKernelType in case when custom kernel has been set */
CUSTOM=-1,
/** Linear kernel. No mapping is done, linear discrimination (or regression) is
done in the original feature space. It is the fastest option. \f$K(x_i, x_j) = x_i^T x_j\f$. */
@ -678,13 +648,13 @@ public:
to such proportion in the whole train dataset.
The method trains the %SVM model automatically by choosing the optimal parameters C, gamma, p,
nu, coef0, degree from SVM::Params. Parameters are considered optimal when the cross-validation
nu, coef0, degree. Parameters are considered optimal when the cross-validation
estimate of the test set error is minimal.
If there is no need to optimize a parameter, the corresponding grid step should be set to any
value less than or equal to 1. For example, to avoid optimization in gamma, set `gammaGrid.step
= 0`, `gammaGrid.minVal`, `gamma_grid.maxVal` as arbitrary numbers. In this case, the value
`params.gamma` is taken for gamma.
`Gamma` is taken for gamma.
And, finally, if the optimization in a parameter is required but the corresponding grid is
unknown, you may call the function SVM::getDefaultGrid. To generate a grid, for example, for
@ -710,16 +680,6 @@ public:
*/
CV_WRAP virtual Mat getSupportVectors() const = 0;
virtual void setParams(const Params& p, const Ptr<Kernel>& customKernel=Ptr<Kernel>()) = 0;
/** @brief Returns the current %SVM parameters.
This function may be used to get the optimal parameters obtained while automatically training
SVM::trainAuto.
*/
virtual Params getParams() const = 0;
virtual Ptr<Kernel> getKernel() const = 0;
/** @brief Retrieves the decision function
@param i the index of the decision function. If the problem solved is regression, 1-class or
@ -747,21 +707,10 @@ public:
*/
static ParamGrid getDefaultGrid( int param_id );
/** @brief Creates empty model
@param p %SVM parameters
@param customKernel the optional custom kernel to use. It must implement SVM::Kernel interface.
Use StatModel::train to train the model:
@code
StatModel::train<SVM>(traindata, params); // to create and train the model
// or
StatModel::load<SVM>(filename); // to load the pre-trained model.
@endcode
Since %SVM has several parameters, you may want to find the best parameters for your problem. It
can be done with SVM::trainAuto.
*/
static Ptr<SVM> create(const Params& p=Params(), const Ptr<Kernel>& customKernel=Ptr<Kernel>());
/** Creates empty model.
Use StatModel::train to train the model. Since %SVM has several parameters, you may want to
find the best parameters for your problem, it can be done with SVM::trainAuto. */
static Ptr<SVM> create();
};
/****************************************************************************************\
@ -802,34 +751,22 @@ public:
//! The initial step
enum {START_E_STEP=1, START_M_STEP=2, START_AUTO_STEP=0};
/** @brief The class describes %EM training parameters.
*/
class CV_EXPORTS_W_MAP Params
{
public:
/** @brief The constructor
/** The number of mixture components in the Gaussian mixture model.
Default value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could
determine the optimal number of mixtures within a specified value range, but that is not the
case in ML yet. */
CV_PURE_PROPERTY(int, ClustersNumber)
@param nclusters The number of mixture components in the Gaussian mixture model. Default
value of the parameter is EM::DEFAULT_NCLUSTERS=5. Some of %EM implementation could
determine the optimal number of mixtures within a specified value range, but that is not
the case in ML yet.
@param covMatType Constraint on covariance matrices which defines type of matrices. See
EM::Types.
@param termCrit The termination criteria of the %EM algorithm. The %EM algorithm can be
terminated by the number of iterations termCrit.maxCount (number of M-steps) or when
relative change of likelihood logarithm is less than termCrit.epsilon. Default maximum
number of iterations is EM::DEFAULT_MAX_ITERS=100.
*/
explicit Params(int nclusters=DEFAULT_NCLUSTERS, int covMatType=EM::COV_MAT_DIAGONAL,
const TermCriteria& termCrit=TermCriteria(TermCriteria::COUNT+TermCriteria::EPS,
EM::DEFAULT_MAX_ITERS, 1e-6));
CV_PROP_RW int nclusters;
CV_PROP_RW int covMatType;
CV_PROP_RW TermCriteria termCrit;
};
/** Constraint on covariance matrices which defines type of matrices.
See EM::Types. */
CV_PURE_PROPERTY(int, CovarianceMatrixType)
/** The termination criteria of the %EM algorithm.
The %EM algorithm can be terminated by the number of iterations termCrit.maxCount (number of
M-steps) or when relative change of likelihood logarithm is less than termCrit.epsilon. Default
maximum number of iterations is EM::DEFAULT_MAX_ITERS=100. */
CV_PURE_PROPERTY_S(TermCriteria, TermCriteria)
virtual void setParams(const Params& p) = 0;
virtual Params getParams() const = 0;
/** @brief Returns weights of the mixtures
Returns vector with the number of elements equal to the number of mixtures.
@ -862,9 +799,7 @@ public:
*/
CV_WRAP virtual Vec2d predict2(InputArray sample, OutputArray probs) const = 0;
virtual bool train( const Ptr<TrainData>& trainData, int flags=0 ) = 0;
/** @brief Static method that estimate the Gaussian mixture parameters from a samples set
/** @brief Estimate the Gaussian mixture parameters from a samples set.
This variation starts with Expectation step. Initial values of the model parameters will be
estimated by the k-means algorithm.
@ -891,15 +826,13 @@ public:
@param probs The optional output matrix that contains posterior probabilities of each Gaussian
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
CV_64FC1 type.
@param params The Gaussian mixture params, see EM::Params description
*/
static Ptr<EM> train(InputArray samples,
virtual bool trainEM(InputArray samples,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray(),
const Params& params=Params());
OutputArray probs=noArray()) = 0;
/** @brief Static method that estimate the Gaussian mixture parameters from a samples set
/** @brief Estimate the Gaussian mixture parameters from a samples set.
This variation starts with Expectation step. You need to provide initial means \f$a_k\f$ of
mixture components. Optionally you can pass initial weights \f$\pi_k\f$ and covariance matrices
@ -925,17 +858,15 @@ public:
@param probs The optional output matrix that contains posterior probabilities of each Gaussian
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
CV_64FC1 type.
@param params The Gaussian mixture params, see EM::Params description
*/
static Ptr<EM> train_startWithE(InputArray samples, InputArray means0,
virtual bool trainE(InputArray samples, InputArray means0,
InputArray covs0=noArray(),
InputArray weights0=noArray(),
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray(),
const Params& params=Params());
OutputArray probs=noArray()) = 0;
/** @brief Static method that estimate the Gaussian mixture parameters from a samples set
/** @brief Estimate the Gaussian mixture parameters from a samples set.
This variation starts with Maximization step. You need to provide initial probabilities
\f$p_{i,k}\f$ to use this option.
@ -952,22 +883,17 @@ public:
@param probs The optional output matrix that contains posterior probabilities of each Gaussian
mixture component given the each sample. It has \f$nsamples \times nclusters\f$ size and
CV_64FC1 type.
@param params The Gaussian mixture params, see EM::Params description
*/
static Ptr<EM> train_startWithM(InputArray samples, InputArray probs0,
virtual bool trainM(InputArray samples, InputArray probs0,
OutputArray logLikelihoods=noArray(),
OutputArray labels=noArray(),
OutputArray probs=noArray(),
const Params& params=Params());
/** @brief Creates empty %EM model
@param params %EM parameters
OutputArray probs=noArray()) = 0;
/** Creates empty %EM model.
The model should be trained then using StatModel::train(traindata, flags) method. Alternatively, you
can use one of the EM::train\* methods or load it from file using StatModel::load\<EM\>(filename).
*/
static Ptr<EM> create(const Params& params=Params());
static Ptr<EM> create();
};
/****************************************************************************************\
@ -989,68 +915,58 @@ public:
/** Predict options */
enum Flags { PREDICT_AUTO=0, PREDICT_SUM=(1<<8), PREDICT_MAX_VOTE=(2<<8), PREDICT_MASK=(3<<8) };
/** @brief The structure contains all the decision tree training parameters.
/** Cluster possible values of a categorical variable into K\<=maxCategories clusters to
find a suboptimal split.
If a discrete variable, on which the training procedure tries to make a split, takes more than
maxCategories values, the precise best subset estimation may take a very long time because the
algorithm is exponential. Instead, many decision trees engines (including our implementation)
try to find sub-optimal split in this case by clustering all the samples into maxCategories
clusters that is some categories are merged together. The clustering is applied only in n \>
2-class classification problems for categorical variables with N \> max_categories possible
values. In case of regression and 2-class classification the optimal split can be found
efficiently without employing clustering, thus the parameter is not used in these cases.
Default value is 10.*/
CV_PURE_PROPERTY(int, MaxCategories)
You can initialize it by default constructor and then override any parameters directly before
training, or the structure may be fully initialized using the advanced variant of the
constructor.
*/
class CV_EXPORTS_W_MAP Params
{
public:
/** @brief Default constructor. */
Params();
/** @brief Constructor with parameters */
Params( int maxDepth, int minSampleCount,
double regressionAccuracy, bool useSurrogates,
int maxCategories, int CVFolds,
bool use1SERule, bool truncatePrunedTree,
const Mat& priors );
/** The maximum possible depth of the tree.
That is the training algorithms attempts to split a node while its depth is less than maxDepth.
The root node has zero depth. The actual depth may be smaller if the other termination criteria
are met (see the outline of the training procedure @ref ml_intro_trees "here"), and/or if the
tree is pruned. Default value is INT_MAX.*/
CV_PURE_PROPERTY(int, MaxDepth)
/** @brief Cluster possible values of a categorical variable into K\<=maxCategories clusters
to find a suboptimal split.
/** If the number of samples in a node is less than this parameter then the node will not be split.
If a discrete variable, on which the training procedure tries to make a split, takes more
than maxCategories values, the precise best subset estimation may take a very long time
because the algorithm is exponential. Instead, many decision trees engines (including our
implementation) try to find sub-optimal split in this case by clustering all the samples
into maxCategories clusters that is some categories are merged together. The clustering is
applied only in n \> 2-class classification problems for categorical variables with N \>
max_categories possible values. In case of regression and 2-class classification the optimal
split can be found efficiently without employing clustering, thus the parameter is not used
in these cases. Default value is 10.*/
CV_PROP_RW int maxCategories;
/** @brief The maximum possible depth of the tree.
Default value is 10.*/
CV_PURE_PROPERTY(int, MinSampleCount)
That is the training algorithms attempts to split a node while its depth is less than
maxDepth. The root node has zero depth. The actual depth may be smaller if the other
termination criteria are met (see the outline of the training procedure @ref ml_intro_trees
"here"), and/or if the tree is pruned. Default value is INT_MAX.*/
CV_PROP_RW int maxDepth;
/** If the number of samples in a node is less than this parameter then the node will not be
split. Default value is 10.*/
CV_PROP_RW int minSampleCount;
/** If CVFolds \> 1 then algorithms prunes the built decision tree using K-fold
cross-validation procedure where K is equal to CVFolds. Default value is 10.*/
CV_PROP_RW int CVFolds;
/** @brief If true then surrogate splits will be built.
cross-validation procedure where K is equal to CVFolds.
Default value is 10.*/
CV_PURE_PROPERTY(int, CVFolds)
/** If true then surrogate splits will be built.
These splits allow to work with missing data and compute variable importance correctly.
@note currently it's not implemented. Default value is false.*/
CV_PROP_RW bool useSurrogates;
/** If true then a pruning will be harsher. This will make a tree more compact and more
resistant to the training data noise but a bit less accurate. Default value is true.*/
CV_PROP_RW bool use1SERule;
/** If true then pruned branches are physically removed from the tree. Otherwise they are
retained and it is possible to get results from the original unpruned (or pruned less
aggressively) tree. Default value is true.*/
CV_PROP_RW bool truncatePrunedTree;
/** @brief Termination criteria for regression trees.
Default value is false.
@note currently it's not implemented.*/
CV_PURE_PROPERTY(bool, UseSurrogates)
/** If true then a pruning will be harsher.
This will make a tree more compact and more resistant to the training data noise but a bit less
accurate. Default value is true.*/
CV_PURE_PROPERTY(bool, Use1SERule)
/** If true then pruned branches are physically removed from the tree.
Otherwise they are retained and it is possible to get results from the original unpruned (or
pruned less aggressively) tree. Default value is true.*/
CV_PURE_PROPERTY(bool, TruncatePrunedTree)
/** Termination criteria for regression trees.
If all absolute differences between an estimated value in a node and values of train samples
in this node are less than this parameter then the node will not be split further. Default
value is 0.01f*/
CV_PROP_RW float regressionAccuracy;
CV_PURE_PROPERTY(float, RegressionAccuracy)
/** @brief The array of a priori class probabilities, sorted by the class label value.
The parameter can be used to tune the decision tree preferences toward a certain class. For
@ -1066,8 +982,7 @@ public:
category is 1 and the weight of the second category is 10, then each mistake in predicting
the second category is equivalent to making 10 mistakes in predicting the first category.
Default value is empty Mat.*/
CV_PROP_RW Mat priors;
};
CV_PURE_PROPERTY_S(cv::Mat, Priors)
/** @brief The class represents a decision tree node.
*/
@ -1114,13 +1029,6 @@ public:
@endcode */
};
/** @brief Sets the training parameters
*/
virtual void setDParams(const Params& p);
/** @brief Returns the training parameters
*/
virtual Params getDParams() const;
/** @brief Returns indices of root nodes
*/
virtual const std::vector<int>& getRoots() const = 0;
@ -1146,7 +1054,7 @@ public:
trained using train method (see StatModel::train). Alternatively, you can load the model from
file using StatModel::load\<DTrees\>(filename).
*/
static Ptr<DTrees> create(const Params& params=Params());
static Ptr<DTrees> create();
};
/****************************************************************************************\
@ -1160,58 +1068,38 @@ public:
class CV_EXPORTS_W RTrees : public DTrees
{
public:
/** @brief The set of training parameters for the forest is a superset of the training
parameters for a single tree.
However, random trees do not need all the functionality/features of decision trees. Most
noticeably, the trees are not pruned, so the cross-validation parameters are not used.
*/
class CV_EXPORTS_W_MAP Params : public DTrees::Params
{
public:
/** @brief Default constructor. */
Params();
/** @brief Constructor with parameters. */
Params( int maxDepth, int minSampleCount,
double regressionAccuracy, bool useSurrogates,
int maxCategories, const Mat& priors,
bool calcVarImportance, int nactiveVars,
TermCriteria termCrit );
/** If true then variable importance will be calculated and then it can be retrieved by RTrees::getVarImportance.
Default value is false.*/
CV_PURE_PROPERTY(bool, CalculateVarImportance)
/** If true then variable importance will be calculated and then it can be retrieved by
RTrees::getVarImportance. Default value is false.*/
CV_PROP_RW bool calcVarImportance;
/** The size of the randomly selected subset of features at each tree node and that are used
to find the best split(s). If you set it to 0 then the size will be set to the square root
of the total number of features. Default value is 0.*/
CV_PROP_RW int nactiveVars;
/** The termination criteria that specifies when the training algorithm stops - either when
the specified number of trees is trained and added to the ensemble or when sufficient
accuracy (measured as OOB error) is achieved. Typically the more trees you have the better
the accuracy. However, the improvement in accuracy generally diminishes and asymptotes pass
a certain number of trees. Also to keep in mind, the number of tree increases the prediction
time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS + TermCriteria::EPS,
50, 0.1)*/
CV_PROP_RW TermCriteria termCrit;
};
to find the best split(s).
If you set it to 0 then the size will be set to the square root of the total number of
features. Default value is 0.*/
CV_PURE_PROPERTY(int, ActiveVarCount)
virtual void setRParams(const Params& p) = 0;
virtual Params getRParams() const = 0;
/** @brief Returns the variable importance array.
/** The termination criteria that specifies when the training algorithm stops.
Either when the specified number of trees is trained and added to the ensemble or when
sufficient accuracy (measured as OOB error) is achieved. Typically the more trees you have the
better the accuracy. However, the improvement in accuracy generally diminishes and asymptotes
pass a certain number of trees. Also to keep in mind, the number of tree increases the
prediction time linearly. Default value is TermCriteria(TermCriteria::MAX_ITERS +
TermCriteria::EPS, 50, 0.1)*/
CV_PURE_PROPERTY_S(TermCriteria, TermCriteria)
/** Returns the variable importance array.
The method returns the variable importance vector, computed at the training stage when
Params::calcVarImportance is set to true. If this flag was set to false, the empty matrix is
CalculateVarImportance is set to true. If this flag was set to false, the empty matrix is
returned.
*/
virtual Mat getVarImportance() const = 0;
/** @brief Creates the empty model
/** Creates the empty model.
Use StatModel::train to train the model, StatModel::train to create and train the model,
StatModel::load to load the pre-trained model.
*/
static Ptr<RTrees> create(const Params& params=Params());
static Ptr<RTrees> create();
};
/****************************************************************************************\
@ -1225,36 +1113,21 @@ public:
class CV_EXPORTS_W Boost : public DTrees
{
public:
/** @brief Parameters of Boost trees.
/** Type of the boosting algorithm.
See Boost::Types. Default value is Boost::REAL. */
CV_PURE_PROPERTY(int, BoostType)
The structure is derived from DTrees::Params but not all of the decision tree parameters are
supported. In particular, cross-validation is not supported.
/** The number of weak classifiers.
Default value is 100. */
CV_PURE_PROPERTY(int, WeakCount)
All parameters are public. You can initialize them by a constructor and then override some of
them directly if you want.
*/
class CV_EXPORTS_W_MAP Params : public DTrees::Params
{
public:
CV_PROP_RW int boostType; //!< Type of the boosting algorithm. See Boost::Types.
//!< Default value is Boost::REAL.
CV_PROP_RW int weakCount; //!< The number of weak classifiers. Default value is 100.
/** A threshold between 0 and 1 used to save computational time. Samples with summary weight
\f$\leq 1 - weight_trim_rate\f$ do not participate in the *next* iteration of training. Set
this parameter to 0 to turn off this functionality. Default value is 0.95.*/
CV_PROP_RW double weightTrimRate;
/** A threshold between 0 and 1 used to save computational time.
Samples with summary weight \f$\leq 1 - weight_trim_rate\f$ do not participate in the *next*
iteration of training. Set this parameter to 0 to turn off this functionality. Default value is 0.95.*/
CV_PURE_PROPERTY(double, WeightTrimRate)
/** @brief Default constructor */
Params();
/** @brief Constructor with parameters */
Params( int boostType, int weakCount, double weightTrimRate,
int maxDepth, bool useSurrogates, const Mat& priors );
};
/** @brief Boosting type
Gentle AdaBoost and Real AdaBoost are often the preferable choices.
*/
/** Boosting type.
Gentle AdaBoost and Real AdaBoost are often the preferable choices. */
enum Types {
DISCRETE=0, //!< Discrete AdaBoost.
REAL=1, //!< Real AdaBoost. It is a technique that utilizes confidence-rated predictions
@ -1264,17 +1137,9 @@ public:
//!<reason is often good with regression data.
};
/** @brief Returns the boosting parameters */
virtual Params getBParams() const = 0;
/** @brief Sets the boosting parameters */
virtual void setBParams(const Params& p) = 0;
/** @brief Creates the empty model
Use StatModel::train to train the model, StatModel::train\<Boost\>(traindata, params) to create
and train the model, StatModel::load\<Boost\>(filename) to load the pre-trained model.
*/
static Ptr<Boost> create(const Params& params=Params());
/** Creates the empty model.
Use StatModel::train to train the model, StatModel::load\<Boost\>(filename) to load the pre-trained model. */
static Ptr<Boost> create();
};
/****************************************************************************************\
@ -1327,67 +1192,77 @@ Additional flags for StatModel::train are available: ANN_MLP::TrainFlags.
class CV_EXPORTS_W ANN_MLP : public StatModel
{
public:
/** @brief Parameters of the MLP and of the training algorithm.
*/
struct CV_EXPORTS_W_MAP Params
{
/** @brief Default constructor */
Params();
/** @brief Constructor with parameters
@note param1 sets Params::rp_dw0 for RPROP and Paramss::bp_dw_scale for BACKPROP.
@note param2 sets Params::rp_dw_min for RPROP and Params::bp_moment_scale for BACKPROP.
*/
Params( const Mat& layerSizes, int activateFunc, double fparam1, double fparam2,
TermCriteria termCrit, int trainMethod, double param1, double param2=0 );
/** Available training methods */
enum TrainingMethods {
BACKPROP=0, //!< The back-propagation algorithm.
RPROP=1 //!< The RPROP algorithm. See @cite RPROP93 for details.
};
/** Integer vector specifying the number of neurons in each layer including the input and
output layers. The very first element specifies the number of elements in the input layer.
The last element - number of elements in the output layer. Default value is empty Mat.*/
CV_PROP_RW Mat layerSizes;
/** The activation function for each neuron. Currently the default and the only fully
supported activation function is ANN_MLP::SIGMOID_SYM. See ANN_MLP::ActivationFunctions.*/
CV_PROP_RW int activateFunc;
/** The first parameter of the activation function, \f$\alpha\f$. Default value is 0. */
CV_PROP_RW double fparam1;
/** The second parameter of the activation function, \f$\beta\f$. Default value is 0. */
CV_PROP_RW double fparam2;
/** Sets training method and common parameters.
@param method Default value is ANN_MLP::RPROP. See ANN_MLP::TrainingMethods.
@param param1 passed to setRpropDW0 for ANN_MLP::RPROP and to setBackpropWeightScale for ANN_MLP::BACKPROP
@param param2 passed to setRpropDWMin for ANN_MLP::RPROP and to setBackpropMomentumScale for ANN_MLP::BACKPROP.
*/
virtual void setTrainMethod(int method, double param1 = 0, double param2 = 0) = 0;
/** Termination criteria of the training algorithm. You can specify the maximum number of
iterations (maxCount) and/or how much the error could change between the iterations to make
the algorithm continue (epsilon). Default value is TermCriteria(TermCriteria::MAX_ITER +
TermCriteria::EPS, 1000, 0.01).*/
CV_PROP_RW TermCriteria termCrit;
/** Training method. Default value is Params::RPROP. See ANN_MLP::Params::TrainingMethods.*/
CV_PROP_RW int trainMethod;
/** Returns current training method */
virtual int getTrainMethod() const = 0;
// backpropagation parameters
/** BPROP: Strength of the weight gradient term. The recommended value is about 0.1. Default
value is 0.1.*/
CV_PROP_RW double bpDWScale;
/** BPROP: Strength of the momentum term (the difference between weights on the 2 previous
iterations). This parameter provides some inertia to smooth the random fluctuations of the
weights. It can vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so
is good enough. Default value is 0.1.*/
CV_PROP_RW double bpMomentScale;
/** Initialize the activation function for each neuron.
Currently the default and the only fully supported activation function is ANN_MLP::SIGMOID_SYM.
@param type The type of activation function. See ANN_MLP::ActivationFunctions.
@param param1 The first parameter of the activation function, \f$\alpha\f$. Default value is 0.
@param param2 The second parameter of the activation function, \f$\beta\f$. Default value is 0.
*/
virtual void setActivationFunction(int type, double param1 = 0, double param2 = 0) = 0;
// rprop parameters
/** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$. Default value is 0.1.*/
CV_PROP_RW double rpDW0;
/** RPROP: Increase factor \f$\eta^+\f$. It must be \>1. Default value is 1.2.*/
CV_PROP_RW double rpDWPlus;
/** RPROP: Decrease factor \f$\eta^-\f$. It must be \<1. Default value is 0.5.*/
CV_PROP_RW double rpDWMinus;
/** RPROP: Update-values lower limit \f$\Delta_{min}\f$. It must be positive. Default value is FLT_EPSILON.*/
CV_PROP_RW double rpDWMin;
/** RPROP: Update-values upper limit \f$\Delta_{max}\f$. It must be \>1. Default value is 50.*/
CV_PROP_RW double rpDWMax;
};
/** Integer vector specifying the number of neurons in each layer including the input and output layers.
The very first element specifies the number of elements in the input layer.
The last element - number of elements in the output layer. Default value is empty Mat.
@sa getLayerSizes */
virtual void setLayerSizes(InputArray _layer_sizes) = 0;
/** Integer vector specifying the number of neurons in each layer including the input and output layers.
The very first element specifies the number of elements in the input layer.
The last element - number of elements in the output layer.
@sa setLayerSizes */
virtual cv::Mat getLayerSizes() const = 0;
/** Termination criteria of the training algorithm.
You can specify the maximum number of iterations (maxCount) and/or how much the error could
change between the iterations to make the algorithm continue (epsilon). Default value is
TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 1000, 0.01).*/
CV_PURE_PROPERTY(TermCriteria, TermCriteria)
/** BPROP: Strength of the weight gradient term.
The recommended value is about 0.1. Default value is 0.1.*/
CV_PURE_PROPERTY(double, BackpropWeightScale)
/** BPROP: Strength of the momentum term (the difference between weights on the 2 previous iterations).
This parameter provides some inertia to smooth the random fluctuations of the weights. It can
vary from 0 (the feature is disabled) to 1 and beyond. The value 0.1 or so is good enough.
Default value is 0.1.*/
CV_PURE_PROPERTY(double, BackpropMomentumScale)
/** RPROP: Initial value \f$\Delta_0\f$ of update-values \f$\Delta_{ij}\f$.
Default value is 0.1.*/
CV_PURE_PROPERTY(double, RpropDW0)
/** RPROP: Increase factor \f$\eta^+\f$.
It must be \>1. Default value is 1.2.*/
CV_PURE_PROPERTY(double, RpropDWPlus)
/** RPROP: Decrease factor \f$\eta^-\f$.
It must be \<1. Default value is 0.5.*/
CV_PURE_PROPERTY(double, RpropDWMinus)
/** RPROP: Update-values lower limit \f$\Delta_{min}\f$.
It must be positive. Default value is FLT_EPSILON.*/
CV_PURE_PROPERTY(double, RpropDWMin)
/** RPROP: Update-values upper limit \f$\Delta_{max}\f$.
It must be \>1. Default value is 50.*/
CV_PURE_PROPERTY(double, RpropDWMax)
/** possible activation functions */
enum ActivationFunctions {
@ -1422,19 +1297,12 @@ public:
virtual Mat getWeights(int layerIdx) const = 0;
/** @brief Sets the new network parameters */
virtual void setParams(const Params& p) = 0;
/** @brief Retrieves the current network parameters */
virtual Params getParams() const = 0;
/** @brief Creates empty model
Use StatModel::train to train the model, StatModel::train\<ANN_MLP\>(traindata, params) to
create and train the model, StatModel::load\<ANN_MLP\>(filename) to load the pre-trained model.
Use StatModel::train to train the model, StatModel::load\<ANN_MLP\>(filename) to load the pre-trained model.
Note that the train method has optional flags: ANN_MLP::TrainFlags.
*/
static Ptr<ANN_MLP> create(const Params& params=Params());
static Ptr<ANN_MLP> create();
};
/****************************************************************************************\
@ -1448,43 +1316,38 @@ public:
class CV_EXPORTS LogisticRegression : public StatModel
{
public:
class CV_EXPORTS Params
{
public:
/** @brief Constructor */
Params(double learning_rate = 0.001,
int iters = 1000,
int method = LogisticRegression::BATCH,
int normalization = LogisticRegression::REG_L2,
int reg = 1,
int batch_size = 1);
double alpha; //!< learning rate.
int num_iters; //!< number of iterations.
/** Learning rate. */
CV_PURE_PROPERTY(double, LearningRate)
/** Number of iterations. */
CV_PURE_PROPERTY(int, Iterations)
/** Kind of regularization to be applied. See LogisticRegression::RegKinds. */
int norm;
/** Enable or disable regularization. Set to positive integer (greater than zero) to enable
and to 0 to disable. */
int regularized;
CV_PURE_PROPERTY(int, Regularization)
/** Kind of training method used. See LogisticRegression::Methods. */
int train_method;
CV_PURE_PROPERTY(int, TrainMethod)
/** Specifies the number of training samples taken in each step of Mini-Batch Gradient
Descent. Will only be used if using LogisticRegression::MINI_BATCH training algorithm. It
has to take values less than the total number of training samples. */
int mini_batch_size;
/** Termination criteria of the algorithm */
TermCriteria term_crit;
};
CV_PURE_PROPERTY(int, MiniBatchSize)
/** Termination criteria of the algorithm. */
CV_PURE_PROPERTY(TermCriteria, TermCriteria)
//! Regularization kinds
enum RegKinds {
REG_NONE = -1, //!< Regularization disabled
REG_L1 = 0, //!< %L1 norm
REG_L2 = 1 //!< %L2 norm. Set Params::regularized \> 0 when using this kind
REG_L2 = 1 //!< %L2 norm
};
//! Training methods
enum Methods {
BATCH = 0,
MINI_BATCH = 1 //!< Set Params::mini_batch_size to a positive integer when using this method.
MINI_BATCH = 1 //!< Set MiniBatchSize to a positive integer when using this method.
};
/** @brief Predicts responses for input samples and returns a float type.
@ -1505,11 +1368,9 @@ public:
/** @brief Creates empty model.
@param params The training parameters for the classifier of type LogisticRegression::Params.
Creates Logistic Regression model with parameters given.
*/
static Ptr<LogisticRegression> create( const Params& params = Params() );
static Ptr<LogisticRegression> create();
};
/****************************************************************************************\

View File

@ -42,56 +42,35 @@
namespace cv { namespace ml {
ANN_MLP::Params::Params()
struct AnnParams
{
AnnParams()
{
layerSizes = Mat();
activateFunc = SIGMOID_SYM;
fparam1 = fparam2 = 0;
termCrit = TermCriteria( TermCriteria::COUNT + TermCriteria::EPS, 1000, 0.01 );
trainMethod = RPROP;
trainMethod = ANN_MLP::RPROP;
bpDWScale = bpMomentScale = 0.1;
rpDW0 = 0.1; rpDWPlus = 1.2; rpDWMinus = 0.5;
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
}
TermCriteria termCrit;
int trainMethod;
ANN_MLP::Params::Params( const Mat& _layerSizes, int _activateFunc, double _fparam1, double _fparam2,
TermCriteria _termCrit, int _trainMethod, double _param1, double _param2 )
{
layerSizes = _layerSizes;
activateFunc = _activateFunc;
fparam1 = _fparam1;
fparam2 = _fparam2;
termCrit = _termCrit;
trainMethod = _trainMethod;
bpDWScale = bpMomentScale = 0.1;
rpDW0 = 1.; rpDWPlus = 1.2; rpDWMinus = 0.5;
rpDWMin = FLT_EPSILON; rpDWMax = 50.;
double bpDWScale;
double bpMomentScale;
if( trainMethod == RPROP )
{
rpDW0 = _param1;
if( rpDW0 < FLT_EPSILON )
rpDW0 = 1.;
rpDWMin = _param2;
rpDWMin = std::max( rpDWMin, 0. );
}
else if( trainMethod == BACKPROP )
{
bpDWScale = _param1;
if( bpDWScale <= 0 )
bpDWScale = 0.1;
bpDWScale = std::max( bpDWScale, 1e-3 );
bpDWScale = std::min( bpDWScale, 1. );
bpMomentScale = _param2;
if( bpMomentScale < 0 )
bpMomentScale = 0.1;
bpMomentScale = std::min( bpMomentScale, 1. );
}
else
trainMethod = RPROP;
}
double rpDW0;
double rpDWPlus;
double rpDWMinus;
double rpDWMin;
double rpDWMax;
};
template <typename T>
inline T inBounds(T val, T min_val, T max_val)
{
return std::min(std::max(val, min_val), max_val);
}
class ANN_MLPImpl : public ANN_MLP
{
@ -99,27 +78,21 @@ public:
ANN_MLPImpl()
{
clear();
}
ANN_MLPImpl( const Params& p )
{
clear();
setParams(p);
setActivationFunction( SIGMOID_SYM, 0, 0 );
setLayerSizes(Mat());
setTrainMethod(ANN_MLP::RPROP, 0.1, FLT_EPSILON);
}
virtual ~ANN_MLPImpl() {}
void setParams(const Params& p)
{
params = p;
create( params.layerSizes );
set_activ_func( params.activateFunc, params.fparam1, params.fparam2 );
}
Params getParams() const
{
return params;
}
CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.termCrit)
CV_IMPL_PROPERTY(double, BackpropWeightScale, params.bpDWScale)
CV_IMPL_PROPERTY(double, BackpropMomentumScale, params.bpMomentScale)
CV_IMPL_PROPERTY(double, RpropDW0, params.rpDW0)
CV_IMPL_PROPERTY(double, RpropDWPlus, params.rpDWPlus)
CV_IMPL_PROPERTY(double, RpropDWMinus, params.rpDWMinus)
CV_IMPL_PROPERTY(double, RpropDWMin, params.rpDWMin)
CV_IMPL_PROPERTY(double, RpropDWMax, params.rpDWMax)
void clear()
{
@ -132,7 +105,35 @@ public:
int layer_count() const { return (int)layer_sizes.size(); }
void set_activ_func( int _activ_func, double _f_param1, double _f_param2 )
void setTrainMethod(int method, double param1, double param2)
{
if (method != ANN_MLP::RPROP && method != ANN_MLP::BACKPROP)
method = ANN_MLP::RPROP;
params.trainMethod = method;
if(method == ANN_MLP::RPROP )
{
if( param1 < FLT_EPSILON )
param1 = 1.;
params.rpDW0 = param1;
params.rpDWMin = std::max( param2, 0. );
}
else if(method == ANN_MLP::BACKPROP )
{
if( param1 <= 0 )
param1 = 0.1;
params.bpDWScale = inBounds<double>(param1, 1e-3, 1.);
if( param2 < 0 )
param2 = 0.1;
params.bpMomentScale = std::min( param2, 1. );
}
}
int getTrainMethod() const
{
return params.trainMethod;
}
void setActivationFunction(int _activ_func, double _f_param1, double _f_param2 )
{
if( _activ_func < 0 || _activ_func > GAUSSIAN )
CV_Error( CV_StsOutOfRange, "Unknown activation function" );
@ -201,7 +202,12 @@ public:
}
}
void create( InputArray _layer_sizes )
Mat getLayerSizes() const
{
return Mat_<int>(layer_sizes, true);
}
void setLayerSizes( InputArray _layer_sizes )
{
clear();
@ -700,7 +706,7 @@ public:
termcrit.maxCount = std::max((params.termCrit.type & CV_TERMCRIT_ITER ? params.termCrit.maxCount : MAX_ITER), 1);
termcrit.epsilon = std::max((params.termCrit.type & CV_TERMCRIT_EPS ? params.termCrit.epsilon : DEFAULT_EPSILON), DBL_EPSILON);
int iter = params.trainMethod == Params::BACKPROP ?
int iter = params.trainMethod == ANN_MLP::BACKPROP ?
train_backprop( inputs, outputs, sw, termcrit ) :
train_rprop( inputs, outputs, sw, termcrit );
@ -1113,13 +1119,13 @@ public:
fs << "min_val" << min_val << "max_val" << max_val << "min_val1" << min_val1 << "max_val1" << max_val1;
fs << "training_params" << "{";
if( params.trainMethod == Params::BACKPROP )
if( params.trainMethod == ANN_MLP::BACKPROP )
{
fs << "train_method" << "BACKPROP";
fs << "dw_scale" << params.bpDWScale;
fs << "moment_scale" << params.bpMomentScale;
}
else if( params.trainMethod == Params::RPROP )
else if( params.trainMethod == ANN_MLP::RPROP )
{
fs << "train_method" << "RPROP";
fs << "dw0" << params.rpDW0;
@ -1186,7 +1192,7 @@ public:
f_param1 = (double)fn["f_param1"];
f_param2 = (double)fn["f_param2"];
set_activ_func( activ_func, f_param1, f_param2 );
setActivationFunction( activ_func, f_param1, f_param2 );
min_val = (double)fn["min_val"];
max_val = (double)fn["max_val"];
@ -1194,7 +1200,7 @@ public:
max_val1 = (double)fn["max_val1"];
FileNode tpn = fn["training_params"];
params = Params();
params = AnnParams();
if( !tpn.empty() )
{
@ -1202,13 +1208,13 @@ public:
if( tmethod_name == "BACKPROP" )
{
params.trainMethod = Params::BACKPROP;
params.trainMethod = ANN_MLP::BACKPROP;
params.bpDWScale = (double)tpn["dw_scale"];
params.bpMomentScale = (double)tpn["moment_scale"];
}
else if( tmethod_name == "RPROP" )
{
params.trainMethod = Params::RPROP;
params.trainMethod = ANN_MLP::RPROP;
params.rpDW0 = (double)tpn["dw0"];
params.rpDWPlus = (double)tpn["dw_plus"];
params.rpDWMinus = (double)tpn["dw_minus"];
@ -1244,7 +1250,7 @@ public:
vector<int> _layer_sizes;
readVectorOrMat(fn["layer_sizes"], _layer_sizes);
create( _layer_sizes );
setLayerSizes( _layer_sizes );
int i, l_count = layer_count();
read_params(fn);
@ -1267,11 +1273,6 @@ public:
trained = true;
}
Mat getLayerSizes() const
{
return Mat_<int>(layer_sizes, true);
}
Mat getWeights(int layerIdx) const
{
CV_Assert( 0 <= layerIdx && layerIdx < (int)weights.size() );
@ -1304,17 +1305,16 @@ public:
double min_val, max_val, min_val1, max_val1;
int activ_func;
int max_lsize, max_buf_sz;
Params params;
AnnParams params;
RNG rng;
Mutex mtx;
bool trained;
};
Ptr<ANN_MLP> ANN_MLP::create(const ANN_MLP::Params& params)
Ptr<ANN_MLP> ANN_MLP::create()
{
Ptr<ANN_MLPImpl> ann = makePtr<ANN_MLPImpl>(params);
return ann;
return makePtr<ANN_MLPImpl>();
}
}}

View File

@ -54,48 +54,33 @@ log_ratio( double val )
}
Boost::Params::Params()
BoostTreeParams::BoostTreeParams()
{
boostType = Boost::REAL;
weakCount = 100;
weightTrimRate = 0.95;
CVFolds = 0;
maxDepth = 1;
}
Boost::Params::Params( int _boostType, int _weak_count,
double _weightTrimRate, int _maxDepth,
bool _use_surrogates, const Mat& _priors )
BoostTreeParams::BoostTreeParams( int _boostType, int _weak_count,
double _weightTrimRate)
{
boostType = _boostType;
weakCount = _weak_count;
weightTrimRate = _weightTrimRate;
CVFolds = 0;
maxDepth = _maxDepth;
useSurrogates = _use_surrogates;
priors = _priors;
}
class DTreesImplForBoost : public DTreesImpl
{
public:
DTreesImplForBoost() {}
DTreesImplForBoost()
{
params.setCVFolds(0);
params.setMaxDepth(1);
}
virtual ~DTreesImplForBoost() {}
bool isClassifier() const { return true; }
void setBParams(const Boost::Params& p)
{
bparams = p;
}
Boost::Params getBParams() const
{
return bparams;
}
void clear()
{
DTreesImpl::clear();
@ -199,10 +184,6 @@ public:
bool train( const Ptr<TrainData>& trainData, int flags )
{
Params dp(bparams.maxDepth, bparams.minSampleCount, bparams.regressionAccuracy,
bparams.useSurrogates, bparams.maxCategories, 0,
false, false, bparams.priors);
setDParams(dp);
startTraining(trainData, flags);
int treeidx, ntrees = bparams.weakCount >= 0 ? bparams.weakCount : 10000;
vector<int> sidx = w->sidx;
@ -426,12 +407,6 @@ public:
void readParams( const FileNode& fn )
{
DTreesImpl::readParams(fn);
bparams.maxDepth = params0.maxDepth;
bparams.minSampleCount = params0.minSampleCount;
bparams.regressionAccuracy = params0.regressionAccuracy;
bparams.useSurrogates = params0.useSurrogates;
bparams.maxCategories = params0.maxCategories;
bparams.priors = params0.priors;
FileNode tparams_node = fn["training_params"];
// check for old layout
@ -465,7 +440,7 @@ public:
}
}
Boost::Params bparams;
BoostTreeParams bparams;
vector<double> sumResult;
};
@ -476,6 +451,20 @@ public:
BoostImpl() {}
virtual ~BoostImpl() {}
CV_IMPL_PROPERTY(int, BoostType, impl.bparams.boostType)
CV_IMPL_PROPERTY(int, WeakCount, impl.bparams.weakCount)
CV_IMPL_PROPERTY(double, WeightTrimRate, impl.bparams.weightTrimRate)
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
String getDefaultModelName() const { return "opencv_ml_boost"; }
bool train( const Ptr<TrainData>& trainData, int flags )
@ -498,9 +487,6 @@ public:
impl.read(fn);
}
void setBParams(const Params& p) { impl.setBParams(p); }
Params getBParams() const { return impl.getBParams(); }
int getVarCount() const { return impl.getVarCount(); }
bool isTrained() const { return impl.isTrained(); }
@ -515,11 +501,9 @@ public:
};
Ptr<Boost> Boost::create(const Params& params)
Ptr<Boost> Boost::create()
{
Ptr<BoostImpl> p = makePtr<BoostImpl>();
p->setBParams(params);
return p;
return makePtr<BoostImpl>();
}
}}

View File

@ -48,37 +48,49 @@ namespace ml
const double minEigenValue = DBL_EPSILON;
EM::Params::Params(int _nclusters, int _covMatType, const TermCriteria& _termCrit)
{
nclusters = _nclusters;
covMatType = _covMatType;
termCrit = _termCrit;
}
class CV_EXPORTS EMImpl : public EM
{
public:
EMImpl(const Params& _params)
int nclusters;
int covMatType;
TermCriteria termCrit;
CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, termCrit)
void setClustersNumber(int val)
{
setParams(_params);
nclusters = val;
CV_Assert(nclusters > 1);
}
int getClustersNumber() const
{
return nclusters;
}
void setCovarianceMatrixType(int val)
{
covMatType = val;
CV_Assert(covMatType == COV_MAT_SPHERICAL ||
covMatType == COV_MAT_DIAGONAL ||
covMatType == COV_MAT_GENERIC);
}
int getCovarianceMatrixType() const
{
return covMatType;
}
EMImpl()
{
nclusters = DEFAULT_NCLUSTERS;
covMatType=EM::COV_MAT_DIAGONAL;
termCrit = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, EM::DEFAULT_MAX_ITERS, 1e-6);
}
virtual ~EMImpl() {}
void setParams(const Params& _params)
{
params = _params;
CV_Assert(params.nclusters > 1);
CV_Assert(params.covMatType == COV_MAT_SPHERICAL ||
params.covMatType == COV_MAT_DIAGONAL ||
params.covMatType == COV_MAT_GENERIC);
}
Params getParams() const
{
return params;
}
void clear()
{
trainSamples.release();
@ -100,10 +112,10 @@ public:
bool train(const Ptr<TrainData>& data, int)
{
Mat samples = data->getTrainSamples(), labels;
return train_(samples, labels, noArray(), noArray());
return trainEM(samples, labels, noArray(), noArray());
}
bool train_(InputArray samples,
bool trainEM(InputArray samples,
OutputArray logLikelihoods,
OutputArray labels,
OutputArray probs)
@ -157,7 +169,7 @@ public:
{
if( _outputs.fixedType() )
ptype = _outputs.type();
_outputs.create(samples.rows, params.nclusters, ptype);
_outputs.create(samples.rows, nclusters, ptype);
}
else
nsamples = std::min(nsamples, 1);
@ -193,7 +205,7 @@ public:
{
if( _probs.fixedType() )
ptype = _probs.type();
_probs.create(1, params.nclusters, ptype);
_probs.create(1, nclusters, ptype);
probs = _probs.getMat();
}
@ -311,7 +323,6 @@ public:
const std::vector<Mat>* covs0,
const Mat* weights0)
{
int nclusters = params.nclusters, covMatType = params.covMatType;
clear();
checkTrainData(startStep, samples, nclusters, covMatType, probs0, means0, covs0, weights0);
@ -350,7 +361,6 @@ public:
void decomposeCovs()
{
int nclusters = params.nclusters, covMatType = params.covMatType;
CV_Assert(!covs.empty());
covsEigenValues.resize(nclusters);
if(covMatType == COV_MAT_GENERIC)
@ -383,7 +393,6 @@ public:
void clusterTrainSamples()
{
int nclusters = params.nclusters;
int nsamples = trainSamples.rows;
// Cluster samples, compute/update means
@ -443,7 +452,6 @@ public:
void computeLogWeightDivDet()
{
int nclusters = params.nclusters;
CV_Assert(!covsEigenValues.empty());
Mat logWeights;
@ -458,7 +466,7 @@ public:
double logDetCov = 0.;
const int evalCount = static_cast<int>(covsEigenValues[clusterIndex].total());
for(int di = 0; di < evalCount; di++)
logDetCov += std::log(covsEigenValues[clusterIndex].at<double>(params.covMatType != COV_MAT_SPHERICAL ? di : 0));
logDetCov += std::log(covsEigenValues[clusterIndex].at<double>(covMatType != COV_MAT_SPHERICAL ? di : 0));
logWeightDivDet.at<double>(clusterIndex) = logWeights.at<double>(clusterIndex) - 0.5 * logDetCov;
}
@ -466,7 +474,6 @@ public:
bool doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs)
{
int nclusters = params.nclusters;
int dim = trainSamples.cols;
// Precompute the empty initial train data in the cases of START_E_STEP and START_AUTO_STEP
if(startStep != START_M_STEP)
@ -488,9 +495,9 @@ public:
mStep();
double trainLogLikelihood, prevTrainLogLikelihood = 0.;
int maxIters = (params.termCrit.type & TermCriteria::MAX_ITER) ?
params.termCrit.maxCount : DEFAULT_MAX_ITERS;
double epsilon = (params.termCrit.type & TermCriteria::EPS) ? params.termCrit.epsilon : 0.;
int maxIters = (termCrit.type & TermCriteria::MAX_ITER) ?
termCrit.maxCount : DEFAULT_MAX_ITERS;
double epsilon = (termCrit.type & TermCriteria::EPS) ? termCrit.epsilon : 0.;
for(int iter = 0; ; iter++)
{
@ -521,12 +528,12 @@ public:
covs.resize(nclusters);
for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
{
if(params.covMatType == COV_MAT_SPHERICAL)
if(covMatType == COV_MAT_SPHERICAL)
{
covs[clusterIndex].create(dim, dim, CV_64FC1);
setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at<double>(0)));
}
else if(params.covMatType == COV_MAT_DIAGONAL)
else if(covMatType == COV_MAT_DIAGONAL)
{
covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]);
}
@ -555,7 +562,6 @@ public:
// see Alex Smola's blog http://blog.smola.org/page/2 for
// details on the log-sum-exp trick
int nclusters = params.nclusters, covMatType = params.covMatType;
int stype = sample.type();
CV_Assert(!means.empty());
CV_Assert((stype == CV_32F || stype == CV_64F) && (ptype == CV_32F || ptype == CV_64F));
@ -621,7 +627,7 @@ public:
void eStep()
{
// Compute probs_ik from means_k, covs_k and weights_k.
trainProbs.create(trainSamples.rows, params.nclusters, CV_64FC1);
trainProbs.create(trainSamples.rows, nclusters, CV_64FC1);
trainLabels.create(trainSamples.rows, 1, CV_32SC1);
trainLogLikelihoods.create(trainSamples.rows, 1, CV_64FC1);
@ -642,8 +648,6 @@ public:
void mStep()
{
// Update means_k, covs_k and weights_k from probs_ik
int nclusters = params.nclusters;
int covMatType = params.covMatType;
int dim = trainSamples.cols;
// Update weights
@ -755,12 +759,12 @@ public:
void write_params(FileStorage& fs) const
{
fs << "nclusters" << params.nclusters;
fs << "cov_mat_type" << (params.covMatType == COV_MAT_SPHERICAL ? String("spherical") :
params.covMatType == COV_MAT_DIAGONAL ? String("diagonal") :
params.covMatType == COV_MAT_GENERIC ? String("generic") :
format("unknown_%d", params.covMatType));
writeTermCrit(fs, params.termCrit);
fs << "nclusters" << nclusters;
fs << "cov_mat_type" << (covMatType == COV_MAT_SPHERICAL ? String("spherical") :
covMatType == COV_MAT_DIAGONAL ? String("diagonal") :
covMatType == COV_MAT_GENERIC ? String("generic") :
format("unknown_%d", covMatType));
writeTermCrit(fs, termCrit);
}
void write(FileStorage& fs) const
@ -781,15 +785,13 @@ public:
void read_params(const FileNode& fn)
{
Params _params;
_params.nclusters = (int)fn["nclusters"];
nclusters = (int)fn["nclusters"];
String s = (String)fn["cov_mat_type"];
_params.covMatType = s == "spherical" ? COV_MAT_SPHERICAL :
covMatType = s == "spherical" ? COV_MAT_SPHERICAL :
s == "diagonal" ? COV_MAT_DIAGONAL :
s == "generic" ? COV_MAT_GENERIC : -1;
CV_Assert(_params.covMatType >= 0);
_params.termCrit = readTermCrit(fn);
setParams(_params);
CV_Assert(covMatType >= 0);
termCrit = readTermCrit(fn);
}
void read(const FileNode& fn)
@ -820,8 +822,6 @@ public:
std::copy(covs.begin(), covs.end(), _covs.begin());
}
Params params;
// all inner matrices have type CV_64FC1
Mat trainSamples;
Mat trainProbs;
@ -838,41 +838,9 @@ public:
Mat logWeightDivDet;
};
Ptr<EM> EM::train(InputArray samples, OutputArray logLikelihoods,
OutputArray labels, OutputArray probs,
const EM::Params& params)
Ptr<EM> EM::create()
{
Ptr<EMImpl> em = makePtr<EMImpl>(params);
if(!em->train_(samples, logLikelihoods, labels, probs))
em.release();
return em;
}
Ptr<EM> EM::train_startWithE(InputArray samples, InputArray means0,
InputArray covs0, InputArray weights0,
OutputArray logLikelihoods, OutputArray labels,
OutputArray probs, const EM::Params& params)
{
Ptr<EMImpl> em = makePtr<EMImpl>(params);
if(!em->trainE(samples, means0, covs0, weights0, logLikelihoods, labels, probs))
em.release();
return em;
}
Ptr<EM> EM::train_startWithM(InputArray samples, InputArray probs0,
OutputArray logLikelihoods, OutputArray labels,
OutputArray probs, const EM::Params& params)
{
Ptr<EMImpl> em = makePtr<EMImpl>(params);
if(!em->trainM(samples, probs0, logLikelihoods, labels, probs))
em.release();
return em;
}
Ptr<EM> EM::create(const Params& params)
{
return makePtr<EMImpl>(params);
return makePtr<EMImpl>();
}
}

View File

@ -50,46 +50,33 @@
namespace cv {
namespace ml {
KNearest::Params::Params(int k, bool isclassifier_, int Emax_, int algorithmType_) :
defaultK(k),
isclassifier(isclassifier_),
Emax(Emax_),
algorithmType(algorithmType_)
{
}
const String NAME_BRUTE_FORCE = "opencv_ml_knn";
const String NAME_KDTREE = "opencv_ml_knn_kd";
class KNearestImpl : public KNearest
class Impl
{
public:
KNearestImpl(const Params& p)
Impl()
{
params = p;
defaultK = 10;
isclassifier = true;
Emax = INT_MAX;
}
virtual ~KNearestImpl() {}
Params getParams() const { return params; }
void setParams(const Params& p) { params = p; }
bool isClassifier() const { return params.isclassifier; }
bool isTrained() const { return !samples.empty(); }
String getDefaultModelName() const { return "opencv_ml_knn"; }
void clear()
{
samples.release();
responses.release();
}
int getVarCount() const { return samples.cols; }
virtual ~Impl() {}
virtual String getModelName() const = 0;
virtual int getType() const = 0;
virtual float findNearest( InputArray _samples, int k,
OutputArray _results,
OutputArray _neighborResponses,
OutputArray _dists ) const = 0;
bool train( const Ptr<TrainData>& data, int flags )
{
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
Mat new_responses;
data->getTrainResponses().convertTo(new_responses, CV_32F);
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
bool update = (flags & ml::KNearest::UPDATE_MODEL) != 0 && !samples.empty();
CV_Assert( new_samples.type() == CV_32F );
@ -106,9 +93,53 @@ public:
samples.push_back(new_samples);
responses.push_back(new_responses);
doTrain(samples);
return true;
}
virtual void doTrain(InputArray points) { (void)points; }
void clear()
{
samples.release();
responses.release();
}
void read( const FileNode& fn )
{
clear();
isclassifier = (int)fn["is_classifier"] != 0;
defaultK = (int)fn["default_k"];
fn["samples"] >> samples;
fn["responses"] >> responses;
}
void write( FileStorage& fs ) const
{
fs << "is_classifier" << (int)isclassifier;
fs << "default_k" << defaultK;
fs << "samples" << samples;
fs << "responses" << responses;
}
public:
int defaultK;
bool isclassifier;
int Emax;
Mat samples;
Mat responses;
};
class BruteForceImpl : public Impl
{
public:
String getModelName() const { return NAME_BRUTE_FORCE; }
int getType() const { return ml::KNearest::BRUTE_FORCE; }
void findNearestCore( const Mat& _samples, int k0, const Range& range,
Mat* results, Mat* neighbor_responses,
Mat* dists, float* presult ) const
@ -199,7 +230,7 @@ public:
if( results || testidx+range.start == 0 )
{
if( !params.isclassifier || k == 1 )
if( !isclassifier || k == 1 )
{
float s = 0.f;
for( j = 0; j < k; j++ )
@ -251,7 +282,7 @@ public:
struct findKNearestInvoker : public ParallelLoopBody
{
findKNearestInvoker(const KNearestImpl* _p, int _k, const Mat& __samples,
findKNearestInvoker(const BruteForceImpl* _p, int _k, const Mat& __samples,
Mat* __results, Mat* __neighbor_responses, Mat* __dists, float* _presult)
{
p = _p;
@ -273,7 +304,7 @@ public:
}
}
const KNearestImpl* p;
const BruteForceImpl* p;
int k;
const Mat* _samples;
Mat* _results;
@ -324,88 +355,18 @@ public:
//invoker(Range(0, testcount));
return result;
}
float predict(InputArray inputs, OutputArray outputs, int) const
{
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
}
void write( FileStorage& fs ) const
{
fs << "is_classifier" << (int)params.isclassifier;
fs << "default_k" << params.defaultK;
fs << "samples" << samples;
fs << "responses" << responses;
}
void read( const FileNode& fn )
{
clear();
params.isclassifier = (int)fn["is_classifier"] != 0;
params.defaultK = (int)fn["default_k"];
fn["samples"] >> samples;
fn["responses"] >> responses;
}
Mat samples;
Mat responses;
Params params;
};
class KNearestKDTreeImpl : public KNearest
class KDTreeImpl : public Impl
{
public:
KNearestKDTreeImpl(const Params& p)
String getModelName() const { return NAME_KDTREE; }
int getType() const { return ml::KNearest::KDTREE; }
void doTrain(InputArray points)
{
params = p;
}
virtual ~KNearestKDTreeImpl() {}
Params getParams() const { return params; }
void setParams(const Params& p) { params = p; }
bool isClassifier() const { return params.isclassifier; }
bool isTrained() const { return !samples.empty(); }
String getDefaultModelName() const { return "opencv_ml_knn_kd"; }
void clear()
{
samples.release();
responses.release();
}
int getVarCount() const { return samples.cols; }
bool train( const Ptr<TrainData>& data, int flags )
{
Mat new_samples = data->getTrainSamples(ROW_SAMPLE);
Mat new_responses;
data->getTrainResponses().convertTo(new_responses, CV_32F);
bool update = (flags & UPDATE_MODEL) != 0 && !samples.empty();
CV_Assert( new_samples.type() == CV_32F );
if( !update )
{
clear();
}
else
{
CV_Assert( new_samples.cols == samples.cols &&
new_responses.cols == responses.cols );
}
samples.push_back(new_samples);
responses.push_back(new_responses);
tr.build(samples);
return true;
tr.build(points);
}
float findNearest( InputArray _samples, int k,
@ -460,51 +421,97 @@ public:
{
_d = d.row(i);
}
tr.findNearest(test_samples.row(i), k, params.Emax, _res, _nr, _d, noArray());
tr.findNearest(test_samples.row(i), k, Emax, _res, _nr, _d, noArray());
}
return result; // currently always 0
}
float predict(InputArray inputs, OutputArray outputs, int) const
KDTree tr;
};
//================================================================
class KNearestImpl : public KNearest
{
return findNearest( inputs, params.defaultK, outputs, noArray(), noArray() );
CV_IMPL_PROPERTY(int, DefaultK, impl->defaultK)
CV_IMPL_PROPERTY(bool, IsClassifier, impl->isclassifier)
CV_IMPL_PROPERTY(int, Emax, impl->Emax)
public:
int getAlgorithmType() const
{
return impl->getType();
}
void setAlgorithmType(int val)
{
if (val != BRUTE_FORCE && val != KDTREE)
val = BRUTE_FORCE;
initImpl(val);
}
public:
KNearestImpl()
{
initImpl(BRUTE_FORCE);
}
~KNearestImpl()
{
}
bool isClassifier() const { return impl->isclassifier; }
bool isTrained() const { return !impl->samples.empty(); }
int getVarCount() const { return impl->samples.cols; }
void write( FileStorage& fs ) const
{
fs << "is_classifier" << (int)params.isclassifier;
fs << "default_k" << params.defaultK;
fs << "samples" << samples;
fs << "responses" << responses;
impl->write(fs);
}
void read( const FileNode& fn )
{
clear();
params.isclassifier = (int)fn["is_classifier"] != 0;
params.defaultK = (int)fn["default_k"];
fn["samples"] >> samples;
fn["responses"] >> responses;
int algorithmType = BRUTE_FORCE;
if (fn.name() == NAME_KDTREE)
algorithmType = KDTREE;
initImpl(algorithmType);
impl->read(fn);
}
KDTree tr;
float findNearest( InputArray samples, int k,
OutputArray results,
OutputArray neighborResponses=noArray(),
OutputArray dist=noArray() ) const
{
return impl->findNearest(samples, k, results, neighborResponses, dist);
}
Mat samples;
Mat responses;
Params params;
float predict(InputArray inputs, OutputArray outputs, int) const
{
return impl->findNearest( inputs, impl->defaultK, outputs, noArray(), noArray() );
}
bool train( const Ptr<TrainData>& data, int flags )
{
return impl->train(data, flags);
}
String getDefaultModelName() const { return impl->getModelName(); }
protected:
void initImpl(int algorithmType)
{
if (algorithmType != KDTREE)
impl = makePtr<BruteForceImpl>();
else
impl = makePtr<KDTreeImpl>();
}
Ptr<Impl> impl;
};
Ptr<KNearest> KNearest::create(const Params& p)
Ptr<KNearest> KNearest::create()
{
if (KDTREE==p.algorithmType)
{
return makePtr<KNearestKDTreeImpl>(p);
}
return makePtr<KNearestImpl>(p);
return makePtr<KNearestImpl>();
}
}

View File

@ -60,31 +60,41 @@ using namespace std;
namespace cv {
namespace ml {
LogisticRegression::Params::Params(double learning_rate,
int iters,
int method,
int normlization,
int reg,
int batch_size)
class LrParams
{
alpha = learning_rate;
num_iters = iters;
norm = normlization;
regularized = reg;
train_method = method;
mini_batch_size = batch_size;
public:
LrParams()
{
alpha = 0.001;
num_iters = 1000;
norm = LogisticRegression::REG_L2;
train_method = LogisticRegression::BATCH;
mini_batch_size = 1;
term_crit = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, num_iters, alpha);
}
double alpha; //!< learning rate.
int num_iters; //!< number of iterations.
int norm;
int train_method;
int mini_batch_size;
TermCriteria term_crit;
};
class LogisticRegressionImpl : public LogisticRegression
{
public:
LogisticRegressionImpl(const Params& pms)
: params(pms)
{
}
LogisticRegressionImpl() { }
virtual ~LogisticRegressionImpl() {}
CV_IMPL_PROPERTY(double, LearningRate, params.alpha)
CV_IMPL_PROPERTY(int, Iterations, params.num_iters)
CV_IMPL_PROPERTY(int, Regularization, params.norm)
CV_IMPL_PROPERTY(int, TrainMethod, params.train_method)
CV_IMPL_PROPERTY(int, MiniBatchSize, params.mini_batch_size)
CV_IMPL_PROPERTY(TermCriteria, TermCriteria, params.term_crit)
virtual bool train( const Ptr<TrainData>& trainData, int=0 );
virtual float predict(InputArray samples, OutputArray results, int) const;
virtual void clear();
@ -103,7 +113,7 @@ protected:
bool set_label_map(const Mat& _labels_i);
Mat remap_labels(const Mat& _labels_i, const map<int, int>& lmap) const;
protected:
Params params;
LrParams params;
Mat learnt_thetas;
map<int, int> forward_mapper;
map<int, int> reverse_mapper;
@ -111,9 +121,9 @@ protected:
Mat labels_n;
};
Ptr<LogisticRegression> LogisticRegression::create(const Params& params)
Ptr<LogisticRegression> LogisticRegression::create()
{
return makePtr<LogisticRegressionImpl>(params);
return makePtr<LogisticRegressionImpl>();
}
bool LogisticRegressionImpl::train(const Ptr<TrainData>& trainData, int)
@ -312,7 +322,7 @@ double LogisticRegressionImpl::compute_cost(const Mat& _data, const Mat& _labels
theta_b = _init_theta(Range(1, n), Range::all());
multiply(theta_b, theta_b, theta_c, 1);
if(this->params.regularized > 0)
if(params.norm != REG_NONE)
{
llambda = 1;
}
@ -367,7 +377,7 @@ Mat LogisticRegressionImpl::compute_batch_gradient(const Mat& _data, const Mat&
m = _data.rows;
n = _data.cols;
if(this->params.regularized > 0)
if(params.norm != REG_NONE)
{
llambda = 1;
}
@ -439,7 +449,7 @@ Mat LogisticRegressionImpl::compute_mini_batch_gradient(const Mat& _data, const
Mat data_d;
Mat labels_l;
if(this->params.regularized > 0)
if(params.norm != REG_NONE)
{
lambda_l = 1;
}
@ -570,7 +580,6 @@ void LogisticRegressionImpl::write(FileStorage& fs) const
fs<<"alpha"<<this->params.alpha;
fs<<"iterations"<<this->params.num_iters;
fs<<"norm"<<this->params.norm;
fs<<"regularized"<<this->params.regularized;
fs<<"train_method"<<this->params.train_method;
if(this->params.train_method == LogisticRegression::MINI_BATCH)
{
@ -592,7 +601,6 @@ void LogisticRegressionImpl::read(const FileNode& fn)
this->params.alpha = (double)fn["alpha"];
this->params.num_iters = (int)fn["iterations"];
this->params.norm = (int)fn["norm"];
this->params.regularized = (int)fn["regularized"];
this->params.train_method = (int)fn["train_method"];
if(this->params.train_method == LogisticRegression::MINI_BATCH)

View File

@ -43,7 +43,6 @@
namespace cv {
namespace ml {
NormalBayesClassifier::Params::Params() {}
class NormalBayesClassifierImpl : public NormalBayesClassifier
{
@ -53,9 +52,6 @@ public:
nallvars = 0;
}
void setParams(const Params&) {}
Params getParams() const { return Params(); }
bool train( const Ptr<TrainData>& trainData, int flags )
{
const float min_variation = FLT_EPSILON;
@ -455,7 +451,7 @@ public:
};
Ptr<NormalBayesClassifier> NormalBayesClassifier::create(const Params&)
Ptr<NormalBayesClassifier> NormalBayesClassifier::create()
{
Ptr<NormalBayesClassifierImpl> p = makePtr<NormalBayesClassifierImpl>();
return p;

View File

@ -120,6 +120,91 @@ namespace ml
return termCrit;
}
struct TreeParams
{
TreeParams();
TreeParams( int maxDepth, int minSampleCount,
double regressionAccuracy, bool useSurrogates,
int maxCategories, int CVFolds,
bool use1SERule, bool truncatePrunedTree,
const Mat& priors );
inline void setMaxCategories(int val)
{
if( val < 2 )
CV_Error( CV_StsOutOfRange, "max_categories should be >= 2" );
maxCategories = std::min(val, 15 );
}
inline void setMaxDepth(int val)
{
if( val < 0 )
CV_Error( CV_StsOutOfRange, "max_depth should be >= 0" );
maxDepth = std::min( val, 25 );
}
inline void setMinSampleCount(int val)
{
minSampleCount = std::max(val, 1);
}
inline void setCVFolds(int val)
{
if( val < 0 )
CV_Error( CV_StsOutOfRange,
"params.CVFolds should be =0 (the tree is not pruned) "
"or n>0 (tree is pruned using n-fold cross-validation)" );
if( val == 1 )
val = 0;
CVFolds = val;
}
inline void setRegressionAccuracy(float val)
{
if( val < 0 )
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
regressionAccuracy = val;
}
inline int getMaxCategories() const { return maxCategories; }
inline int getMaxDepth() const { return maxDepth; }
inline int getMinSampleCount() const { return minSampleCount; }
inline int getCVFolds() const { return CVFolds; }
inline float getRegressionAccuracy() const { return regressionAccuracy; }
CV_IMPL_PROPERTY(bool, UseSurrogates, useSurrogates)
CV_IMPL_PROPERTY(bool, Use1SERule, use1SERule)
CV_IMPL_PROPERTY(bool, TruncatePrunedTree, truncatePrunedTree)
CV_IMPL_PROPERTY_S(cv::Mat, Priors, priors)
public:
bool useSurrogates;
bool use1SERule;
bool truncatePrunedTree;
Mat priors;
protected:
int maxCategories;
int maxDepth;
int minSampleCount;
int CVFolds;
float regressionAccuracy;
};
struct RTreeParams
{
RTreeParams();
RTreeParams(bool calcVarImportance, int nactiveVars, TermCriteria termCrit );
bool calcVarImportance;
int nactiveVars;
TermCriteria termCrit;
};
struct BoostTreeParams
{
BoostTreeParams();
BoostTreeParams(int boostType, int weakCount, double weightTrimRate);
int boostType;
int weakCount;
double weightTrimRate;
};
class DTreesImpl : public DTrees
{
public:
@ -191,6 +276,16 @@ namespace ml
int maxSubsetSize;
};
CV_WRAP_SAME_PROPERTY(int, MaxCategories, params)
CV_WRAP_SAME_PROPERTY(int, MaxDepth, params)
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, params)
CV_WRAP_SAME_PROPERTY(int, CVFolds, params)
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, params)
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, params)
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, params)
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, params)
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, params)
DTreesImpl();
virtual ~DTreesImpl();
virtual void clear();
@ -202,8 +297,7 @@ namespace ml
int getCatCount(int vi) const { return catOfs[vi][1] - catOfs[vi][0]; }
int getSubsetSize(int vi) const { return (getCatCount(vi) + 31)/32; }
virtual void setDParams(const Params& _params);
virtual Params getDParams() const;
virtual void setDParams(const TreeParams& _params);
virtual void startTraining( const Ptr<TrainData>& trainData, int flags );
virtual void endTraining();
virtual void initCompVarIdx();
@ -250,7 +344,7 @@ namespace ml
virtual const std::vector<Split>& getSplits() const { return splits; }
virtual const std::vector<int>& getSubsets() const { return subsets; }
Params params0, params;
TreeParams params;
vector<int> varIdx;
vector<int> compVarIdx;

View File

@ -48,21 +48,16 @@ namespace ml {
//////////////////////////////////////////////////////////////////////////////////////////
// Random trees //
//////////////////////////////////////////////////////////////////////////////////////////
RTrees::Params::Params()
: DTrees::Params(5, 10, 0.f, false, 10, 0, false, false, Mat())
RTreeParams::RTreeParams()
{
calcVarImportance = false;
nactiveVars = 0;
termCrit = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 50, 0.1);
}
RTrees::Params::Params( int _maxDepth, int _minSampleCount,
double _regressionAccuracy, bool _useSurrogates,
int _maxCategories, const Mat& _priors,
bool _calcVarImportance, int _nactiveVars,
RTreeParams::RTreeParams(bool _calcVarImportance,
int _nactiveVars,
TermCriteria _termCrit )
: DTrees::Params(_maxDepth, _minSampleCount, _regressionAccuracy, _useSurrogates,
_maxCategories, 0, false, false, _priors)
{
calcVarImportance = _calcVarImportance;
nactiveVars = _nactiveVars;
@ -73,19 +68,20 @@ RTrees::Params::Params( int _maxDepth, int _minSampleCount,
class DTreesImplForRTrees : public DTreesImpl
{
public:
DTreesImplForRTrees() {}
DTreesImplForRTrees()
{
params.setMaxDepth(5);
params.setMinSampleCount(10);
params.setRegressionAccuracy(0.f);
params.useSurrogates = false;
params.setMaxCategories(10);
params.setCVFolds(0);
params.use1SERule = false;
params.truncatePrunedTree = false;
params.priors = Mat();
}
virtual ~DTreesImplForRTrees() {}
void setRParams(const RTrees::Params& p)
{
rparams = p;
}
RTrees::Params getRParams() const
{
return rparams;
}
void clear()
{
DTreesImpl::clear();
@ -129,10 +125,6 @@ public:
bool train( const Ptr<TrainData>& trainData, int flags )
{
Params dp(rparams.maxDepth, rparams.minSampleCount, rparams.regressionAccuracy,
rparams.useSurrogates, rparams.maxCategories, rparams.CVFolds,
rparams.use1SERule, rparams.truncatePrunedTree, rparams.priors);
setDParams(dp);
startTraining(trainData, flags);
int treeidx, ntrees = (rparams.termCrit.type & TermCriteria::COUNT) != 0 ?
rparams.termCrit.maxCount : 10000;
@ -326,12 +318,6 @@ public:
void readParams( const FileNode& fn )
{
DTreesImpl::readParams(fn);
rparams.maxDepth = params0.maxDepth;
rparams.minSampleCount = params0.minSampleCount;
rparams.regressionAccuracy = params0.regressionAccuracy;
rparams.useSurrogates = params0.useSurrogates;
rparams.maxCategories = params0.maxCategories;
rparams.priors = params0.priors;
FileNode tparams_node = fn["training_params"];
rparams.nactiveVars = (int)tparams_node["nactive_vars"];
@ -361,7 +347,7 @@ public:
}
}
RTrees::Params rparams;
RTreeParams rparams;
double oobError;
vector<float> varImportance;
vector<int> allVars, activeVars;
@ -372,6 +358,20 @@ public:
class RTreesImpl : public RTrees
{
public:
CV_IMPL_PROPERTY(bool, CalculateVarImportance, impl.rparams.calcVarImportance)
CV_IMPL_PROPERTY(int, ActiveVarCount, impl.rparams.nactiveVars)
CV_IMPL_PROPERTY_S(TermCriteria, TermCriteria, impl.rparams.termCrit)
CV_WRAP_SAME_PROPERTY(int, MaxCategories, impl.params)
CV_WRAP_SAME_PROPERTY(int, MaxDepth, impl.params)
CV_WRAP_SAME_PROPERTY(int, MinSampleCount, impl.params)
CV_WRAP_SAME_PROPERTY(int, CVFolds, impl.params)
CV_WRAP_SAME_PROPERTY(bool, UseSurrogates, impl.params)
CV_WRAP_SAME_PROPERTY(bool, Use1SERule, impl.params)
CV_WRAP_SAME_PROPERTY(bool, TruncatePrunedTree, impl.params)
CV_WRAP_SAME_PROPERTY(float, RegressionAccuracy, impl.params)
CV_WRAP_SAME_PROPERTY_S(cv::Mat, Priors, impl.params)
RTreesImpl() {}
virtual ~RTreesImpl() {}
@ -397,9 +397,6 @@ public:
impl.read(fn);
}
void setRParams(const Params& p) { impl.setRParams(p); }
Params getRParams() const { return impl.getRParams(); }
Mat getVarImportance() const { return Mat_<float>(impl.varImportance, true); }
int getVarCount() const { return impl.getVarCount(); }
@ -415,11 +412,9 @@ public:
};
Ptr<RTrees> RTrees::create(const Params& params)
Ptr<RTrees> RTrees::create()
{
Ptr<RTreesImpl> p = makePtr<RTreesImpl>();
p->setRParams(params);
return p;
return makePtr<RTreesImpl>();
}
}}

View File

@ -103,7 +103,20 @@ static void checkParamGrid(const ParamGrid& pg)
}
// SVM training parameters
SVM::Params::Params()
struct SvmParams
{
int svmType;
int kernelType;
double gamma;
double coef0;
double degree;
double C;
double nu;
double p;
Mat classWeights;
TermCriteria termCrit;
SvmParams()
{
svmType = SVM::C_SVC;
kernelType = SVM::RBF;
@ -116,8 +129,7 @@ SVM::Params::Params()
termCrit = TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON );
}
SVM::Params::Params( int _svmType, int _kernelType,
SvmParams( int _svmType, int _kernelType,
double _degree, double _gamma, double _coef0,
double _Con, double _nu, double _p,
const Mat& _classWeights, TermCriteria _termCrit )
@ -134,23 +146,17 @@ SVM::Params::Params( int _svmType, int _kernelType,
termCrit = _termCrit;
}
};
/////////////////////////////////////// SVM kernel ///////////////////////////////////////
class SVMKernelImpl : public SVM::Kernel
{
public:
SVMKernelImpl()
{
}
SVMKernelImpl( const SVM::Params& _params )
SVMKernelImpl( const SvmParams& _params = SvmParams() )
{
params = _params;
}
virtual ~SVMKernelImpl()
{
}
int getType() const
{
return params.kernelType;
@ -327,7 +333,7 @@ public:
}
}
SVM::Params params;
SvmParams params;
};
@ -1185,7 +1191,7 @@ public:
int cache_size;
int max_cache_size;
Mat samples;
SVM::Params params;
SvmParams params;
vector<KernelRow> lru_cache;
int lru_first;
int lru_last;
@ -1215,6 +1221,7 @@ public:
SVMImpl()
{
clear();
checkParams();
}
~SVMImpl()
@ -1235,13 +1242,39 @@ public:
return sv;
}
void setParams( const Params& _params, const Ptr<Kernel>& _kernel )
CV_IMPL_PROPERTY(int, Type, params.svmType)
CV_IMPL_PROPERTY(double, Gamma, params.gamma)
CV_IMPL_PROPERTY(double, Coef0, params.coef0)
CV_IMPL_PROPERTY(double, Degree, params.degree)
CV_IMPL_PROPERTY(double, C, params.C)
CV_IMPL_PROPERTY(double, Nu, params.nu)
CV_IMPL_PROPERTY(double, P, params.p)
CV_IMPL_PROPERTY_S(cv::Mat, ClassWeights, params.classWeights)
CV_IMPL_PROPERTY_S(cv::TermCriteria, TermCriteria, params.termCrit)
int getKernelType() const
{
params = _params;
return params.kernelType;
}
void setKernel(int kernelType)
{
params.kernelType = kernelType;
if (kernelType != CUSTOM)
kernel = makePtr<SVMKernelImpl>(params);
}
void setCustomKernel(const Ptr<Kernel> &_kernel)
{
params.kernelType = CUSTOM;
kernel = _kernel;
}
void checkParams()
{
int kernelType = params.kernelType;
int svmType = params.svmType;
if (kernelType != CUSTOM)
{
if( kernelType != LINEAR && kernelType != POLY &&
kernelType != SIGMOID && kernelType != RBF &&
kernelType != INTER && kernelType != CHI2)
@ -1262,6 +1295,16 @@ public:
else if( params.degree <= 0 )
CV_Error( CV_StsOutOfRange, "The kernel parameter <degree> must be positive" );
kernel = makePtr<SVMKernelImpl>(params);
}
else
{
if (!kernel)
CV_Error( CV_StsBadArg, "Custom kernel is not set" );
}
int svmType = params.svmType;
if( svmType != C_SVC && svmType != NU_SVC &&
svmType != ONE_CLASS && svmType != EPS_SVR &&
svmType != NU_SVR )
@ -1285,28 +1328,18 @@ public:
if( svmType != C_SVC )
params.classWeights.release();
termCrit = params.termCrit;
if( !(termCrit.type & TermCriteria::EPS) )
termCrit.epsilon = DBL_EPSILON;
termCrit.epsilon = std::max(termCrit.epsilon, DBL_EPSILON);
if( !(termCrit.type & TermCriteria::COUNT) )
termCrit.maxCount = INT_MAX;
termCrit.maxCount = std::max(termCrit.maxCount, 1);
if( _kernel )
kernel = _kernel;
else
kernel = makePtr<SVMKernelImpl>(params);
if( !(params.termCrit.type & TermCriteria::EPS) )
params.termCrit.epsilon = DBL_EPSILON;
params.termCrit.epsilon = std::max(params.termCrit.epsilon, DBL_EPSILON);
if( !(params.termCrit.type & TermCriteria::COUNT) )
params.termCrit.maxCount = INT_MAX;
params.termCrit.maxCount = std::max(params.termCrit.maxCount, 1);
}
Params getParams() const
void setParams( const SvmParams& _params)
{
return params;
}
Ptr<Kernel> getKernel() const
{
return kernel;
params = _params;
checkParams();
}
int getSVCount(int i) const
@ -1335,9 +1368,9 @@ public:
_responses.convertTo(_yf, CV_32F);
bool ok =
svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, termCrit ) :
svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, termCrit ) :
svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, termCrit ) : false;
svmType == ONE_CLASS ? Solver::solve_one_class( _samples, params.nu, kernel, _alpha, sinfo, params.termCrit ) :
svmType == EPS_SVR ? Solver::solve_eps_svr( _samples, _yf, params.p, params.C, kernel, _alpha, sinfo, params.termCrit ) :
svmType == NU_SVR ? Solver::solve_nu_svr( _samples, _yf, params.nu, params.C, kernel, _alpha, sinfo, params.termCrit ) : false;
if( !ok )
return false;
@ -1397,7 +1430,7 @@ public:
//check that while cross-validation there were the samples from all the classes
if( class_ranges[class_count] <= 0 )
CV_Error( CV_StsBadArg, "While cross-validation one or more of the classes have "
"been fell out of the sample. Try to enlarge <CvSVMParams::k_fold>" );
"been fell out of the sample. Try to enlarge <Params::k_fold>" );
if( svmType == NU_SVC )
{
@ -1448,10 +1481,10 @@ public:
DecisionFunc df;
bool ok = params.svmType == C_SVC ?
Solver::solve_c_svc( temp_samples, temp_y, Cp, Cn,
kernel, _alpha, sinfo, termCrit ) :
kernel, _alpha, sinfo, params.termCrit ) :
params.svmType == NU_SVC ?
Solver::solve_nu_svc( temp_samples, temp_y, params.nu,
kernel, _alpha, sinfo, termCrit ) :
kernel, _alpha, sinfo, params.termCrit ) :
false;
if( !ok )
return false;
@ -1557,6 +1590,8 @@ public:
{
clear();
checkParams();
int svmType = params.svmType;
Mat samples = data->getTrainSamples();
Mat responses;
@ -1586,6 +1621,8 @@ public:
ParamGrid nu_grid, ParamGrid coef_grid, ParamGrid degree_grid,
bool balanced )
{
checkParams();
int svmType = params.svmType;
RNG rng((uint64)-1);
@ -1708,7 +1745,7 @@ public:
int test_sample_count = (sample_count + k_fold/2)/k_fold;
int train_sample_count = sample_count - test_sample_count;
Params best_params = params;
SvmParams best_params = params;
double min_error = FLT_MAX;
int rtype = responses.type();
@ -1729,7 +1766,7 @@ public:
FOR_IN_GRID(degree, degree_grid)
{
// make sure we updated the kernel and other parameters
setParams(params, Ptr<Kernel>() );
setParams(params);
double error = 0;
for( k = 0; k < k_fold; k++ )
@ -1919,7 +1956,9 @@ public:
kernelType == LINEAR ? "LINEAR" :
kernelType == POLY ? "POLY" :
kernelType == RBF ? "RBF" :
kernelType == SIGMOID ? "SIGMOID" : format("Unknown_%d", kernelType);
kernelType == SIGMOID ? "SIGMOID" :
kernelType == CHI2 ? "CHI2" :
kernelType == INTER ? "INTER" : format("Unknown_%d", kernelType);
fs << "svmType" << svm_type_str;
@ -2036,7 +2075,7 @@ public:
void read_params( const FileNode& fn )
{
Params _params;
SvmParams _params;
// check for old naming
String svm_type_str = (String)(fn["svm_type"].empty() ? fn["svmType"] : fn["svm_type"]);
@ -2059,10 +2098,12 @@ public:
kernel_type_str == "LINEAR" ? LINEAR :
kernel_type_str == "POLY" ? POLY :
kernel_type_str == "RBF" ? RBF :
kernel_type_str == "SIGMOID" ? SIGMOID : -1;
kernel_type_str == "SIGMOID" ? SIGMOID :
kernel_type_str == "CHI2" ? CHI2 :
kernel_type_str == "INTER" ? INTER : CUSTOM;
if( kernelType < 0 )
CV_Error( CV_StsParseError, "Missing of invalid SVM kernel type" );
if( kernelType == CUSTOM )
CV_Error( CV_StsParseError, "Invalid SVM kernel type (or custom kernel)" );
_params.svmType = svmType;
_params.kernelType = kernelType;
@ -2086,7 +2127,7 @@ public:
else
_params.termCrit = TermCriteria( TermCriteria::EPS + TermCriteria::COUNT, 1000, FLT_EPSILON );
setParams( _params, Ptr<Kernel>() );
setParams( _params );
}
void read( const FileNode& fn )
@ -2154,8 +2195,7 @@ public:
optimize_linear_svm();
}
Params params;
TermCriteria termCrit;
SvmParams params;
Mat class_labels;
int var_count;
Mat sv;
@ -2167,11 +2207,9 @@ public:
};
Ptr<SVM> SVM::create(const Params& params, const Ptr<SVM::Kernel>& kernel)
Ptr<SVM> SVM::create()
{
Ptr<SVMImpl> p = makePtr<SVMImpl>();
p->setParams(params, kernel);
return p;
return makePtr<SVMImpl>();
}
}

View File

@ -48,18 +48,7 @@ namespace ml {
using std::vector;
void DTrees::setDParams(const DTrees::Params&)
{
CV_Error(CV_StsNotImplemented, "");
}
DTrees::Params DTrees::getDParams() const
{
CV_Error(CV_StsNotImplemented, "");
return DTrees::Params();
}
DTrees::Params::Params()
TreeParams::TreeParams()
{
maxDepth = INT_MAX;
minSampleCount = 10;
@ -72,7 +61,7 @@ DTrees::Params::Params()
priors = Mat();
}
DTrees::Params::Params( int _maxDepth, int _minSampleCount,
TreeParams::TreeParams(int _maxDepth, int _minSampleCount,
double _regressionAccuracy, bool _useSurrogates,
int _maxCategories, int _CVFolds,
bool _use1SERule, bool _truncatePrunedTree,
@ -248,7 +237,7 @@ const vector<int>& DTreesImpl::getActiveVars()
int DTreesImpl::addTree(const vector<int>& sidx )
{
size_t n = (params.maxDepth > 0 ? (1 << params.maxDepth) : 1024) + w->wnodes.size();
size_t n = (params.getMaxDepth() > 0 ? (1 << params.getMaxDepth()) : 1024) + w->wnodes.size();
w->wnodes.reserve(n);
w->wsplits.reserve(n);
@ -257,7 +246,7 @@ int DTreesImpl::addTree(const vector<int>& sidx )
w->wsplits.clear();
w->wsubsets.clear();
int cv_n = params.CVFolds;
int cv_n = params.getCVFolds();
if( cv_n > 0 )
{
@ -347,34 +336,9 @@ int DTreesImpl::addTree(const vector<int>& sidx )
return root;
}
DTrees::Params DTreesImpl::getDParams() const
void DTreesImpl::setDParams(const TreeParams& _params)
{
return params0;
}
void DTreesImpl::setDParams(const Params& _params)
{
params0 = params = _params;
if( params.maxCategories < 2 )
CV_Error( CV_StsOutOfRange, "params.max_categories should be >= 2" );
params.maxCategories = std::min( params.maxCategories, 15 );
if( params.maxDepth < 0 )
CV_Error( CV_StsOutOfRange, "params.max_depth should be >= 0" );
params.maxDepth = std::min( params.maxDepth, 25 );
params.minSampleCount = std::max(params.minSampleCount, 1);
if( params.CVFolds < 0 )
CV_Error( CV_StsOutOfRange,
"params.CVFolds should be =0 (the tree is not pruned) "
"or n>0 (tree is pruned using n-fold cross-validation)" );
if( params.CVFolds == 1 )
params.CVFolds = 0;
if( params.regressionAccuracy < 0 )
CV_Error( CV_StsOutOfRange, "params.regression_accuracy should be >= 0" );
params = _params;
}
int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
@ -385,7 +349,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
node.parent = parent;
node.depth = parent >= 0 ? w->wnodes[parent].depth + 1 : 0;
int nfolds = params.CVFolds;
int nfolds = params.getCVFolds();
if( nfolds > 0 )
{
@ -400,7 +364,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
calcValue( nidx, sidx );
if( n <= params.minSampleCount || node.depth >= params.maxDepth )
if( n <= params.getMinSampleCount() || node.depth >= params.getMaxDepth() )
can_split = false;
else if( _isClassifier )
{
@ -415,7 +379,7 @@ int DTreesImpl::addNodeAndTrySplit( int parent, const vector<int>& sidx )
}
else
{
if( sqrt(node.node_risk) < params.regressionAccuracy )
if( sqrt(node.node_risk) < params.getRegressionAccuracy() )
can_split = false;
}
@ -493,7 +457,7 @@ int DTreesImpl::findBestSplit( const vector<int>& _sidx )
void DTreesImpl::calcValue( int nidx, const vector<int>& _sidx )
{
WNode* node = &w->wnodes[nidx];
int i, j, k, n = (int)_sidx.size(), cv_n = params.CVFolds;
int i, j, k, n = (int)_sidx.size(), cv_n = params.getCVFolds();
int m = (int)classLabels.size();
cv::AutoBuffer<double> buf(std::max(m, 3)*(cv_n+1));
@ -841,8 +805,8 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector<int>& _si
int m = (int)classLabels.size();
int base_size = m*(3 + mi) + mi + 1;
if( m > 2 && mi > params.maxCategories )
base_size += m*std::min(params.maxCategories, n) + mi;
if( m > 2 && mi > params.getMaxCategories() )
base_size += m*std::min(params.getMaxCategories(), n) + mi;
else
base_size += mi;
AutoBuffer<double> buf(base_size + n);
@ -880,9 +844,9 @@ DTreesImpl::WSplit DTreesImpl::findSplitCatClass( int vi, const vector<int>& _si
if( m > 2 )
{
if( mi > params.maxCategories )
if( mi > params.getMaxCategories() )
{
mi = std::min(params.maxCategories, n);
mi = std::min(params.getMaxCategories(), n);
cjk = c_weights + _mi;
cluster_labels = (int*)(cjk + m*mi);
clusterCategories( _cjk, _mi, m, cjk, mi, cluster_labels );
@ -1228,7 +1192,7 @@ int DTreesImpl::pruneCV( int root )
// 2. choose the best tree index (if need, apply 1SE rule).
// 3. store the best index and cut the branches.
int ti, tree_count = 0, j, cv_n = params.CVFolds, n = w->wnodes[root].sample_count;
int ti, tree_count = 0, j, cv_n = params.getCVFolds(), n = w->wnodes[root].sample_count;
// currently, 1SE for regression is not implemented
bool use_1se = params.use1SERule != 0 && _isClassifier;
double min_err = 0, min_err_se = 0;
@ -1294,7 +1258,7 @@ int DTreesImpl::pruneCV( int root )
double DTreesImpl::updateTreeRNC( int root, double T, int fold )
{
int nidx = root, pidx = -1, cv_n = params.CVFolds;
int nidx = root, pidx = -1, cv_n = params.getCVFolds();
double min_alpha = DBL_MAX;
for(;;)
@ -1350,7 +1314,7 @@ double DTreesImpl::updateTreeRNC( int root, double T, int fold )
bool DTreesImpl::cutTree( int root, double T, int fold, double min_alpha )
{
int cv_n = params.CVFolds, nidx = root, pidx = -1;
int cv_n = params.getCVFolds(), nidx = root, pidx = -1;
WNode* node = &w->wnodes[root];
if( node->left < 0 )
return true;
@ -1560,19 +1524,19 @@ float DTreesImpl::predict( InputArray _samples, OutputArray _results, int flags
void DTreesImpl::writeTrainingParams(FileStorage& fs) const
{
fs << "use_surrogates" << (params0.useSurrogates ? 1 : 0);
fs << "max_categories" << params0.maxCategories;
fs << "regression_accuracy" << params0.regressionAccuracy;
fs << "use_surrogates" << (params.useSurrogates ? 1 : 0);
fs << "max_categories" << params.getMaxCategories();
fs << "regression_accuracy" << params.getRegressionAccuracy();
fs << "max_depth" << params0.maxDepth;
fs << "min_sample_count" << params0.minSampleCount;
fs << "cross_validation_folds" << params0.CVFolds;
fs << "max_depth" << params.getMaxDepth();
fs << "min_sample_count" << params.getMinSampleCount();
fs << "cross_validation_folds" << params.getCVFolds();
if( params0.CVFolds > 1 )
fs << "use_1se_rule" << (params0.use1SERule ? 1 : 0);
if( params.getCVFolds() > 1 )
fs << "use_1se_rule" << (params.use1SERule ? 1 : 0);
if( !params0.priors.empty() )
fs << "priors" << params0.priors;
if( !params.priors.empty() )
fs << "priors" << params.priors;
}
void DTreesImpl::writeParams(FileStorage& fs) const
@ -1724,18 +1688,18 @@ void DTreesImpl::readParams( const FileNode& fn )
FileNode tparams_node = fn["training_params"];
params0 = Params();
TreeParams params0 = TreeParams();
if( !tparams_node.empty() ) // training parameters are not necessary
{
params0.useSurrogates = (int)tparams_node["use_surrogates"] != 0;
params0.maxCategories = (int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"]);
params0.regressionAccuracy = (float)tparams_node["regression_accuracy"];
params0.maxDepth = (int)tparams_node["max_depth"];
params0.minSampleCount = (int)tparams_node["min_sample_count"];
params0.CVFolds = (int)tparams_node["cross_validation_folds"];
params0.setMaxCategories((int)(tparams_node["max_categories"].empty() ? 16 : tparams_node["max_categories"]));
params0.setRegressionAccuracy((float)tparams_node["regression_accuracy"]);
params0.setMaxDepth((int)tparams_node["max_depth"]);
params0.setMinSampleCount((int)tparams_node["min_sample_count"]);
params0.setCVFolds((int)tparams_node["cross_validation_folds"]);
if( params0.CVFolds > 1 )
if( params0.getCVFolds() > 1 )
{
params.use1SERule = (int)tparams_node["use_1se_rule"] != 0;
}
@ -1964,11 +1928,9 @@ void DTreesImpl::read( const FileNode& fn )
readTree(fnodes);
}
Ptr<DTrees> DTrees::create(const DTrees::Params& params)
Ptr<DTrees> DTrees::create()
{
Ptr<DTreesImpl> p = makePtr<DTreesImpl>();
p->setDParams(params);
return p;
return makePtr<DTreesImpl>();
}
}

View File

@ -330,7 +330,8 @@ void CV_KNearestTest::run( int /*start_from*/ )
}
// KNearest KDTree implementation
Ptr<KNearest> knearestKdt = KNearest::create(ml::KNearest::Params(10, true, INT_MAX, ml::KNearest::KDTREE));
Ptr<KNearest> knearestKdt = KNearest::create();
knearestKdt->setAlgorithmType(KNearest::KDTREE);
knearestKdt->train(trainData, ml::ROW_SAMPLE, trainLabels);
knearestKdt->findNearest(testData, 4, bestLabels);
if( !calcErr( bestLabels, testLabels, sizes, err, true ) )
@ -394,16 +395,18 @@ int CV_EMTest::runCase( int caseIndex, const EM_Params& params,
cv::Mat labels;
float err;
Ptr<EM> em;
EM::Params emp(params.nclusters, params.covMatType, params.termCrit);
Ptr<EM> em = EM::create();
em->setClustersNumber(params.nclusters);
em->setCovarianceMatrixType(params.covMatType);
em->setTermCriteria(params.termCrit);
if( params.startStep == EM::START_AUTO_STEP )
em = EM::train( trainData, noArray(), labels, noArray(), emp );
em->trainEM( trainData, noArray(), labels, noArray() );
else if( params.startStep == EM::START_E_STEP )
em = EM::train_startWithE( trainData, *params.means, *params.covs,
*params.weights, noArray(), labels, noArray(), emp );
em->trainE( trainData, *params.means, *params.covs,
*params.weights, noArray(), labels, noArray() );
else if( params.startStep == EM::START_M_STEP )
em = EM::train_startWithM( trainData, *params.probs,
noArray(), labels, noArray(), emp );
em->trainM( trainData, *params.probs,
noArray(), labels, noArray() );
// check train error
if( !calcErr( labels, trainLabels, sizes, err , false, false ) )
@ -543,7 +546,9 @@ protected:
Mat labels;
Ptr<EM> em = EM::train(samples, noArray(), labels, noArray(), EM::Params(nclusters));
Ptr<EM> em = EM::create();
em->setClustersNumber(nclusters);
em->trainEM(samples, noArray(), labels, noArray());
Mat firstResult(samples.rows, 1, CV_32SC1);
for( int i = 0; i < samples.rows; i++)
@ -644,8 +649,13 @@ protected:
samples1.push_back(sample);
}
}
Ptr<EM> model0 = EM::train(samples0, noArray(), noArray(), noArray(), EM::Params(3));
Ptr<EM> model1 = EM::train(samples1, noArray(), noArray(), noArray(), EM::Params(3));
Ptr<EM> model0 = EM::create();
model0->setClustersNumber(3);
model0->trainEM(samples0, noArray(), noArray(), noArray());
Ptr<EM> model1 = EM::create();
model1->setClustersNumber(3);
model1->trainEM(samples1, noArray(), noArray(), noArray());
Mat trainConfusionMat(2, 2, CV_32SC1, Scalar(0)),
testConfusionMat(2, 2, CV_32SC1, Scalar(0));

View File

@ -95,16 +95,13 @@ void CV_LRTest::run( int /*start_from*/ )
string dataFileName = ts->get_data_path() + "iris.data";
Ptr<TrainData> tdata = TrainData::loadFromCSV(dataFileName, 0);
LogisticRegression::Params params = LogisticRegression::Params();
params.alpha = 1.0;
params.num_iters = 10001;
params.norm = LogisticRegression::REG_L2;
params.regularized = 1;
params.train_method = LogisticRegression::BATCH;
params.mini_batch_size = 10;
// run LR classifier train classifier
Ptr<LogisticRegression> p = LogisticRegression::create(params);
Ptr<LogisticRegression> p = LogisticRegression::create();
p->setLearningRate(1.0);
p->setIterations(10001);
p->setRegularization(LogisticRegression::REG_L2);
p->setTrainMethod(LogisticRegression::BATCH);
p->setMiniBatchSize(10);
p->train(tdata);
// predict using the same data
@ -157,20 +154,17 @@ void CV_LRTest_SaveLoad::run( int /*start_from*/ )
Mat responses1, responses2;
Mat learnt_mat1, learnt_mat2;
LogisticRegression::Params params1 = LogisticRegression::Params();
params1.alpha = 1.0;
params1.num_iters = 10001;
params1.norm = LogisticRegression::REG_L2;
params1.regularized = 1;
params1.train_method = LogisticRegression::BATCH;
params1.mini_batch_size = 10;
// train and save the classifier
String filename = tempfile(".xml");
try
{
// run LR classifier train classifier
Ptr<LogisticRegression> lr1 = LogisticRegression::create(params1);
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
lr1->setLearningRate(1.0);
lr1->setIterations(10001);
lr1->setRegularization(LogisticRegression::REG_L2);
lr1->setTrainMethod(LogisticRegression::BATCH);
lr1->setMiniBatchSize(10);
lr1->train(tdata);
lr1->predict(tdata->getSamples(), responses1);
learnt_mat1 = lr1->get_learnt_thetas();

View File

@ -73,30 +73,14 @@ int str_to_svm_kernel_type( String& str )
return -1;
}
Ptr<SVM> svm_train_auto( Ptr<TrainData> _data, SVM::Params _params,
int k_fold, ParamGrid C_grid, ParamGrid gamma_grid,
ParamGrid p_grid, ParamGrid nu_grid, ParamGrid coef_grid,
ParamGrid degree_grid )
{
Mat _train_data = _data->getSamples();
Mat _responses = _data->getResponses();
Mat _var_idx = _data->getVarIdx();
Mat _sample_idx = _data->getTrainSampleIdx();
Ptr<SVM> svm = SVM::create(_params);
if( svm->trainAuto( _data, k_fold, C_grid, gamma_grid, p_grid, nu_grid, coef_grid, degree_grid ) )
return svm;
return Ptr<SVM>();
}
// 4. em
// 5. ann
int str_to_ann_train_method( String& str )
{
if( !str.compare("BACKPROP") )
return ANN_MLP::Params::BACKPROP;
return ANN_MLP::BACKPROP;
if( !str.compare("RPROP") )
return ANN_MLP::Params::RPROP;
return ANN_MLP::RPROP;
CV_Error( CV_StsBadArg, "incorrect ann train method string" );
return -1;
}
@ -343,16 +327,16 @@ int CV_MLBaseTest::train( int testCaseIdx )
String svm_type_str, kernel_type_str;
modelParamsNode["svm_type"] >> svm_type_str;
modelParamsNode["kernel_type"] >> kernel_type_str;
SVM::Params params;
params.svmType = str_to_svm_type( svm_type_str );
params.kernelType = str_to_svm_kernel_type( kernel_type_str );
modelParamsNode["degree"] >> params.degree;
modelParamsNode["gamma"] >> params.gamma;
modelParamsNode["coef0"] >> params.coef0;
modelParamsNode["C"] >> params.C;
modelParamsNode["nu"] >> params.nu;
modelParamsNode["p"] >> params.p;
model = SVM::create(params);
Ptr<SVM> m = SVM::create();
m->setType(str_to_svm_type( svm_type_str ));
m->setKernel(str_to_svm_kernel_type( kernel_type_str ));
m->setDegree(modelParamsNode["degree"]);
m->setGamma(modelParamsNode["gamma"]);
m->setCoef0(modelParamsNode["coef0"]);
m->setC(modelParamsNode["C"]);
m->setNu(modelParamsNode["nu"]);
m->setP(modelParamsNode["p"]);
model = m;
}
else if( modelName == CV_EM )
{
@ -371,9 +355,13 @@ int CV_MLBaseTest::train( int testCaseIdx )
data->getVarIdx(), data->getTrainSampleIdx());
int layer_sz[] = { data->getNAllVars(), 100, 100, (int)cls_map.size() };
Mat layer_sizes( 1, (int)(sizeof(layer_sz)/sizeof(layer_sz[0])), CV_32S, layer_sz );
model = ANN_MLP::create(ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0,
TermCriteria(TermCriteria::COUNT,300,0.01),
str_to_ann_train_method(train_method_str), param1, param2));
Ptr<ANN_MLP> m = ANN_MLP::create();
m->setLayerSizes(layer_sizes);
m->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0);
m->setTermCriteria(TermCriteria(TermCriteria::COUNT,300,0.01));
m->setTrainMethod(str_to_ann_train_method(train_method_str), param1, param2);
model = m;
}
else if( modelName == CV_DTREE )
{
@ -386,8 +374,18 @@ int CV_MLBaseTest::train( int testCaseIdx )
modelParamsNode["max_categories"] >> MAX_CATEGORIES;
modelParamsNode["cv_folds"] >> CV_FOLDS;
modelParamsNode["is_pruned"] >> IS_PRUNED;
model = DTrees::create(DTrees::Params(MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY, USE_SURROGATE,
MAX_CATEGORIES, CV_FOLDS, false, IS_PRUNED, Mat() ));
Ptr<DTrees> m = DTrees::create();
m->setMaxDepth(MAX_DEPTH);
m->setMinSampleCount(MIN_SAMPLE_COUNT);
m->setRegressionAccuracy(REG_ACCURACY);
m->setUseSurrogates(USE_SURROGATE);
m->setMaxCategories(MAX_CATEGORIES);
m->setCVFolds(CV_FOLDS);
m->setUse1SERule(false);
m->setTruncatePrunedTree(IS_PRUNED);
m->setPriors(Mat());
model = m;
}
else if( modelName == CV_BOOST )
{
@ -401,7 +399,15 @@ int CV_MLBaseTest::train( int testCaseIdx )
modelParamsNode["weight_trim_rate"] >> WEIGHT_TRIM_RATE;
modelParamsNode["max_depth"] >> MAX_DEPTH;
//modelParamsNode["use_surrogate"] >> USE_SURROGATE;
model = Boost::create( Boost::Params(BOOST_TYPE, WEAK_COUNT, WEIGHT_TRIM_RATE, MAX_DEPTH, USE_SURROGATE, Mat()) );
Ptr<Boost> m = Boost::create();
m->setBoostType(BOOST_TYPE);
m->setWeakCount(WEAK_COUNT);
m->setWeightTrimRate(WEIGHT_TRIM_RATE);
m->setMaxDepth(MAX_DEPTH);
m->setUseSurrogates(USE_SURROGATE);
m->setPriors(Mat());
model = m;
}
else if( modelName == CV_RTREES )
{
@ -416,9 +422,18 @@ int CV_MLBaseTest::train( int testCaseIdx )
modelParamsNode["is_pruned"] >> IS_PRUNED;
modelParamsNode["nactive_vars"] >> NACTIVE_VARS;
modelParamsNode["max_trees_num"] >> MAX_TREES_NUM;
model = RTrees::create(RTrees::Params( MAX_DEPTH, MIN_SAMPLE_COUNT, REG_ACCURACY,
USE_SURROGATE, MAX_CATEGORIES, Mat(), true, // (calc_var_importance == true) <=> RF processes variable importance
NACTIVE_VARS, TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS)));
Ptr<RTrees> m = RTrees::create();
m->setMaxDepth(MAX_DEPTH);
m->setMinSampleCount(MIN_SAMPLE_COUNT);
m->setRegressionAccuracy(REG_ACCURACY);
m->setUseSurrogates(USE_SURROGATE);
m->setMaxCategories(MAX_CATEGORIES);
m->setPriors(Mat());
m->setCalculateVarImportance(true);
m->setActiveVarCount(NACTIVE_VARS);
m->setTermCriteria(TermCriteria(TermCriteria::COUNT, MAX_TREES_NUM, OOB_EPS));
model = m;
}
if( !model.empty() )

View File

@ -149,9 +149,8 @@ int CV_SLMLTest::validate_test_results( int testCaseIdx )
}
TEST(ML_NaiveBayes, save_load) { CV_SLMLTest test( CV_NBAYES ); test.safe_run(); }
//CV_SLMLTest lsmlknearest( CV_KNEAREST, "slknearest" ); // does not support save!
TEST(ML_KNearest, save_load) { CV_SLMLTest test( CV_KNEAREST ); test.safe_run(); }
TEST(ML_SVM, save_load) { CV_SLMLTest test( CV_SVM ); test.safe_run(); }
//CV_SLMLTest lsmlem( CV_EM, "slem" ); // does not support save!
TEST(ML_ANN, save_load) { CV_SLMLTest test( CV_ANN ); test.safe_run(); }
TEST(ML_DTree, save_load) { CV_SLMLTest test( CV_DTREE ); test.safe_run(); }
TEST(ML_Boost, save_load) { CV_SLMLTest test( CV_BOOST ); test.safe_run(); }

View File

@ -52,11 +52,6 @@
@defgroup shape Shape Distance and Matching
*/
namespace cv
{
CV_EXPORTS bool initModule_shape();
}
#endif
/* End of file. */

View File

@ -66,8 +66,6 @@ public:
{
}
virtual AlgorithmInfo* info() const { return 0; }
//! the main operator
virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape, std::vector<DMatch> &matches);
virtual float applyTransformation(InputArray input, OutputArray output=noArray());

View File

@ -60,8 +60,6 @@ public:
{
}
virtual AlgorithmInfo* info() const { return 0; }
//! the main operator
virtual float computeDistance(InputArray contour1, InputArray contour2);

View File

@ -62,8 +62,6 @@ public:
{
}
virtual AlgorithmInfo* info() const { return 0; }
//! the main operator
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
@ -189,8 +187,6 @@ public:
{
}
virtual AlgorithmInfo* info() const { return 0; }
//! the main operator
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
@ -327,8 +323,6 @@ public:
{
}
virtual AlgorithmInfo* info() const { return 0; }
//! the main operator
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);
@ -445,8 +439,6 @@ public:
{
}
virtual AlgorithmInfo* info() const { return 0; }
//! the main operator
virtual void buildCostMatrix(InputArray descriptors1, InputArray descriptors2, OutputArray costMatrix);

View File

@ -79,8 +79,6 @@ public:
{
}
virtual AlgorithmInfo* info() const { return 0; }
//! the main operator
virtual float computeDistance(InputArray contour1, InputArray contour2);

View File

@ -68,8 +68,6 @@ public:
{
}
virtual AlgorithmInfo* info() const { return 0; }
//! the main operators
virtual void estimateTransformation(InputArray transformingShape, InputArray targetShape, std::vector<DMatch> &matches);
virtual float applyTransformation(InputArray inPts, OutputArray output=noArray());

View File

@ -44,6 +44,7 @@
#define __OPENCV_SUPERRES_HPP__
#include "opencv2/core.hpp"
#include "opencv2/superres/optical_flow.hpp"
/**
@defgroup superres Super Resolution
@ -62,8 +63,6 @@ namespace cv
//! @addtogroup superres
//! @{
CV_EXPORTS bool initModule_superres();
class CV_EXPORTS FrameSource
{
public:
@ -105,6 +104,36 @@ namespace cv
*/
virtual void collectGarbage();
//! @brief Scale factor
CV_PURE_PROPERTY(int, Scale)
//! @brief Iterations count
CV_PURE_PROPERTY(int, Iterations)
//! @brief Asymptotic value of steepest descent method
CV_PURE_PROPERTY(double, Tau)
//! @brief Weight parameter to balance data term and smoothness term
CV_PURE_PROPERTY(double, Labmda)
//! @brief Parameter of spacial distribution in Bilateral-TV
CV_PURE_PROPERTY(double, Alpha)
//! @brief Kernel size of Bilateral-TV filter
CV_PURE_PROPERTY(int, KernelSize)
//! @brief Gaussian blur kernel size
CV_PURE_PROPERTY(int, BlurKernelSize)
//! @brief Gaussian blur sigma
CV_PURE_PROPERTY(double, BlurSigma)
//! @brief Radius of the temporal search area
CV_PURE_PROPERTY(int, TemporalAreaRadius)
//! @brief Dense optical flow algorithm
CV_PURE_PROPERTY_S(Ptr<cv::superres::DenseOpticalFlowExt>, OpticalFlow)
protected:
SuperResolution();
@ -139,7 +168,6 @@ namespace cv
*/
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1();
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_CUDA();
CV_EXPORTS Ptr<SuperResolution> createSuperResolution_BTVL1_OCL();
//! @} superres

View File

@ -60,20 +60,68 @@ namespace cv
virtual void collectGarbage() = 0;
};
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_CUDA();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Farneback_OCL();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();
class CV_EXPORTS FarnebackOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
CV_PURE_PROPERTY(double, PyrScale)
CV_PURE_PROPERTY(int, LevelsNumber)
CV_PURE_PROPERTY(int, WindowSize)
CV_PURE_PROPERTY(int, Iterations)
CV_PURE_PROPERTY(int, PolyN)
CV_PURE_PROPERTY(double, PolySigma)
CV_PURE_PROPERTY(int, Flags)
};
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback();
CV_EXPORTS Ptr<FarnebackOpticalFlow> createOptFlow_Farneback_CUDA();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_CUDA();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_DualTVL1_OCL();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Brox_CUDA();
// CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_Simple();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_CUDA();
CV_EXPORTS Ptr<DenseOpticalFlowExt> createOptFlow_PyrLK_OCL();
class CV_EXPORTS DualTVL1OpticalFlow : public virtual DenseOpticalFlowExt
{
public:
CV_PURE_PROPERTY(double, Tau)
CV_PURE_PROPERTY(double, Lambda)
CV_PURE_PROPERTY(double, Theta)
CV_PURE_PROPERTY(int, ScalesNumber)
CV_PURE_PROPERTY(int, WarpingsNumber)
CV_PURE_PROPERTY(double, Epsilon)
CV_PURE_PROPERTY(int, Iterations)
CV_PURE_PROPERTY(bool, UseInitialFlow)
};
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();
CV_EXPORTS Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1_CUDA();
class CV_EXPORTS BroxOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
//! @brief Flow smoothness
CV_PURE_PROPERTY(double, Alpha)
//! @brief Gradient constancy importance
CV_PURE_PROPERTY(double, Gamma)
//! @brief Pyramid scale factor
CV_PURE_PROPERTY(double, ScaleFactor)
//! @brief Number of lagged non-linearity iterations (inner loop)
CV_PURE_PROPERTY(int, InnerIterations)
//! @brief Number of warping iterations (number of pyramid levels)
CV_PURE_PROPERTY(int, OuterIterations)
//! @brief Number of linear system solver iterations
CV_PURE_PROPERTY(int, SolverIterations)
};
CV_EXPORTS Ptr<BroxOpticalFlow> createOptFlow_Brox_CUDA();
class PyrLKOpticalFlow : public virtual DenseOpticalFlowExt
{
public:
CV_PURE_PROPERTY(int, WindowSize)
CV_PURE_PROPERTY(int, MaxLevel)
CV_PURE_PROPERTY(int, Iterations)
};
CV_EXPORTS Ptr<PyrLKOpticalFlow> createOptFlow_PyrLK_CUDA();
//! @}

View File

@ -138,10 +138,10 @@ PERF_TEST_P(Size_MatType, SuperResolution_BTVL1,
{
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1_CUDA();
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->set("opticalFlow", opticalFlow);
superRes->setScale(scale);
superRes->setIterations(iterations);
superRes->setTemporalAreaRadius(temporalAreaRadius);
superRes->setOpticalFlow(opticalFlow);
superRes->setInput(makePtr<OneFrameSource_CUDA>(GpuMat(frame)));
@ -156,10 +156,10 @@ PERF_TEST_P(Size_MatType, SuperResolution_BTVL1,
{
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->set("opticalFlow", opticalFlow);
superRes->setScale(scale);
superRes->setIterations(iterations);
superRes->setTemporalAreaRadius(temporalAreaRadius);
superRes->setOpticalFlow(opticalFlow);
superRes->setInput(makePtr<OneFrameSource_CPU>(frame));
@ -198,10 +198,10 @@ OCL_PERF_TEST_P(SuperResolution_BTVL1 ,BTVL1,
Ptr<DenseOpticalFlowExt> opticalFlow(new ZeroOpticalFlow);
Ptr<SuperResolution> superRes = createSuperResolution_BTVL1();
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->set("opticalFlow", opticalFlow);
superRes->setScale(scale);
superRes->setIterations(iterations);
superRes->setTemporalAreaRadius(temporalAreaRadius);
superRes->setOpticalFlow(opticalFlow);
superRes->setInput(makePtr<OneFrameSource_CPU>(frame));

View File

@ -460,7 +460,7 @@ namespace
func(_src, _dst, btvKernelSize, btvWeights);
}
class BTVL1_Base
class BTVL1_Base : public cv::superres::SuperResolution
{
public:
BTVL1_Base();
@ -470,6 +470,17 @@ namespace
void collectGarbage();
CV_IMPL_PROPERTY(int, Scale, scale_)
CV_IMPL_PROPERTY(int, Iterations, iterations_)
CV_IMPL_PROPERTY(double, Tau, tau_)
CV_IMPL_PROPERTY(double, Labmda, lambda_)
CV_IMPL_PROPERTY(double, Alpha, alpha_)
CV_IMPL_PROPERTY(int, KernelSize, btvKernelSize_)
CV_IMPL_PROPERTY(int, BlurKernelSize, blurKernelSize_)
CV_IMPL_PROPERTY(double, BlurSigma, blurSigma_)
CV_IMPL_PROPERTY(int, TemporalAreaRadius, temporalAreaRadius_)
CV_IMPL_PROPERTY_S(Ptr<cv::superres::DenseOpticalFlowExt>, OpticalFlow, opticalFlow_)
protected:
int scale_;
int iterations_;
@ -479,7 +490,8 @@ namespace
int btvKernelSize_;
int blurKernelSize_;
double blurSigma_;
Ptr<DenseOpticalFlowExt> opticalFlow_;
int temporalAreaRadius_; // not used in some implementations
Ptr<cv::superres::DenseOpticalFlowExt> opticalFlow_;
private:
bool ocl_process(InputArrayOfArrays src, OutputArray dst, InputArrayOfArrays forwardMotions,
@ -539,6 +551,7 @@ namespace
btvKernelSize_ = 7;
blurKernelSize_ = 5;
blurSigma_ = 0.0;
temporalAreaRadius_ = 0;
opticalFlow_ = createOptFlow_Farneback();
curBlurKernelSize_ = -1;
@ -781,12 +794,9 @@ namespace
////////////////////////////////////////////////////////////////////
class BTVL1 :
public SuperResolution, private BTVL1_Base
class BTVL1 : public BTVL1_Base
{
public:
AlgorithmInfo* info() const;
BTVL1();
void collectGarbage();
@ -799,8 +809,6 @@ namespace
bool ocl_processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
private:
int temporalAreaRadius_;
void readNextFrame(Ptr<FrameSource>& frameSource);
bool ocl_readNextFrame(Ptr<FrameSource>& frameSource);
@ -841,18 +849,6 @@ namespace
#endif
};
CV_INIT_ALGORITHM(BTVL1, "SuperResolution.BTVL1",
obj.info()->addParam(obj, "scale", obj.scale_, false, 0, 0, "Scale factor.");
obj.info()->addParam(obj, "iterations", obj.iterations_, false, 0, 0, "Iteration count.");
obj.info()->addParam(obj, "tau", obj.tau_, false, 0, 0, "Asymptotic value of steepest descent method.");
obj.info()->addParam(obj, "lambda", obj.lambda_, false, 0, 0, "Weight parameter to balance data term and smoothness term.");
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Parameter of spacial distribution in Bilateral-TV.");
obj.info()->addParam(obj, "btvKernelSize", obj.btvKernelSize_, false, 0, 0, "Kernel size of Bilateral-TV filter.");
obj.info()->addParam(obj, "blurKernelSize", obj.blurKernelSize_, false, 0, 0, "Gaussian blur kernel size.");
obj.info()->addParam(obj, "blurSigma", obj.blurSigma_, false, 0, 0, "Gaussian blur sigma.");
obj.info()->addParam(obj, "temporalAreaRadius", obj.temporalAreaRadius_, false, 0, 0, "Radius of the temporal search area.");
obj.info()->addParam<DenseOpticalFlowExt>(obj, "opticalFlow", obj.opticalFlow_, false, 0, 0, "Dense optical flow algorithm."))
BTVL1::BTVL1()
{
temporalAreaRadius_ = 4;
@ -1101,7 +1097,7 @@ namespace
}
}
Ptr<SuperResolution> cv::superres::createSuperResolution_BTVL1()
Ptr<cv::superres::SuperResolution> cv::superres::createSuperResolution_BTVL1()
{
return makePtr<BTVL1>();
}

View File

@ -207,7 +207,7 @@ namespace
funcs[src.channels()](src, dst, ksize);
}
class BTVL1_CUDA_Base
class BTVL1_CUDA_Base : public cv::superres::SuperResolution
{
public:
BTVL1_CUDA_Base();
@ -218,6 +218,17 @@ namespace
void collectGarbage();
CV_IMPL_PROPERTY(int, Scale, scale_)
CV_IMPL_PROPERTY(int, Iterations, iterations_)
CV_IMPL_PROPERTY(double, Tau, tau_)
CV_IMPL_PROPERTY(double, Labmda, lambda_)
CV_IMPL_PROPERTY(double, Alpha, alpha_)
CV_IMPL_PROPERTY(int, KernelSize, btvKernelSize_)
CV_IMPL_PROPERTY(int, BlurKernelSize, blurKernelSize_)
CV_IMPL_PROPERTY(double, BlurSigma, blurSigma_)
CV_IMPL_PROPERTY(int, TemporalAreaRadius, temporalAreaRadius_)
CV_IMPL_PROPERTY_S(Ptr<cv::superres::DenseOpticalFlowExt>, OpticalFlow, opticalFlow_)
protected:
int scale_;
int iterations_;
@ -227,7 +238,8 @@ namespace
int btvKernelSize_;
int blurKernelSize_;
double blurSigma_;
Ptr<DenseOpticalFlowExt> opticalFlow_;
int temporalAreaRadius_;
Ptr<cv::superres::DenseOpticalFlowExt> opticalFlow_;
private:
std::vector<Ptr<cuda::Filter> > filters_;
@ -272,6 +284,7 @@ namespace
#else
opticalFlow_ = createOptFlow_Farneback();
#endif
temporalAreaRadius_ = 0;
curBlurKernelSize_ = -1;
curBlurSigma_ = -1.0;
@ -401,11 +414,9 @@ namespace
////////////////////////////////////////////////////////////
class BTVL1_CUDA : public SuperResolution, private BTVL1_CUDA_Base
class BTVL1_CUDA : public BTVL1_CUDA_Base
{
public:
AlgorithmInfo* info() const;
BTVL1_CUDA();
void collectGarbage();
@ -415,8 +426,6 @@ namespace
void processImpl(Ptr<FrameSource>& frameSource, OutputArray output);
private:
int temporalAreaRadius_;
void readNextFrame(Ptr<FrameSource>& frameSource);
void processFrame(int idx);
@ -438,18 +447,6 @@ namespace
GpuMat finalOutput_;
};
CV_INIT_ALGORITHM(BTVL1_CUDA, "SuperResolution.BTVL1_CUDA",
obj.info()->addParam(obj, "scale", obj.scale_, false, 0, 0, "Scale factor.");
obj.info()->addParam(obj, "iterations", obj.iterations_, false, 0, 0, "Iteration count.");
obj.info()->addParam(obj, "tau", obj.tau_, false, 0, 0, "Asymptotic value of steepest descent method.");
obj.info()->addParam(obj, "lambda", obj.lambda_, false, 0, 0, "Weight parameter to balance data term and smoothness term.");
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Parameter of spacial distribution in Bilateral-TV.");
obj.info()->addParam(obj, "btvKernelSize", obj.btvKernelSize_, false, 0, 0, "Kernel size of Bilateral-TV filter.");
obj.info()->addParam(obj, "blurKernelSize", obj.blurKernelSize_, false, 0, 0, "Gaussian blur kernel size.");
obj.info()->addParam(obj, "blurSigma", obj.blurSigma_, false, 0, 0, "Gaussian blur sigma.");
obj.info()->addParam(obj, "temporalAreaRadius", obj.temporalAreaRadius_, false, 0, 0, "Radius of the temporal search area.");
obj.info()->addParam<DenseOpticalFlowExt>(obj, "opticalFlow", obj.opticalFlow_, false, 0, 0, "Dense optical flow algorithm."));
BTVL1_CUDA::BTVL1_CUDA()
{
temporalAreaRadius_ = 4;

View File

@ -53,7 +53,7 @@ using namespace cv::superres::detail;
namespace
{
class CpuOpticalFlow : public DenseOpticalFlowExt
class CpuOpticalFlow : public virtual cv::superres::DenseOpticalFlowExt
{
public:
explicit CpuOpticalFlow(int work_type);
@ -173,12 +173,20 @@ namespace
namespace
{
class Farneback : public CpuOpticalFlow
class Farneback : public CpuOpticalFlow, public cv::superres::FarnebackOpticalFlow
{
public:
AlgorithmInfo* info() const;
Farneback();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
void collectGarbage();
CV_IMPL_PROPERTY(double, PyrScale, pyrScale_)
CV_IMPL_PROPERTY(int, LevelsNumber, numLevels_)
CV_IMPL_PROPERTY(int, WindowSize, winSize_)
CV_IMPL_PROPERTY(int, Iterations, numIters_)
CV_IMPL_PROPERTY(int, PolyN, polyN_)
CV_IMPL_PROPERTY(double, PolySigma, polySigma_)
CV_IMPL_PROPERTY(int, Flags, flags_)
protected:
void impl(InputArray input0, InputArray input1, OutputArray dst);
@ -193,15 +201,6 @@ namespace
int flags_;
};
CV_INIT_ALGORITHM(Farneback, "DenseOpticalFlowExt.Farneback",
obj.info()->addParam(obj, "pyrScale", obj.pyrScale_);
obj.info()->addParam(obj, "numLevels", obj.numLevels_);
obj.info()->addParam(obj, "winSize", obj.winSize_);
obj.info()->addParam(obj, "numIters", obj.numIters_);
obj.info()->addParam(obj, "polyN", obj.polyN_);
obj.info()->addParam(obj, "polySigma", obj.polySigma_);
obj.info()->addParam(obj, "flags", obj.flags_))
Farneback::Farneback() : CpuOpticalFlow(CV_8UC1)
{
pyrScale_ = 0.5;
@ -213,6 +212,16 @@ namespace
flags_ = 0;
}
void Farneback::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
CpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void Farneback::collectGarbage()
{
CpuOpticalFlow::collectGarbage();
}
void Farneback::impl(InputArray input0, InputArray input1, OutputArray dst)
{
calcOpticalFlowFarneback(input0, input1, (InputOutputArray)dst, pyrScale_,
@ -221,7 +230,7 @@ namespace
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback()
Ptr<cv::superres::FarnebackOpticalFlow> cv::superres::createOptFlow_Farneback()
{
return makePtr<Farneback>();
}
@ -319,65 +328,41 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Simple()
namespace
{
class DualTVL1 : public CpuOpticalFlow
class DualTVL1 : public CpuOpticalFlow, public virtual cv::superres::DualTVL1OpticalFlow
{
public:
AlgorithmInfo* info() const;
DualTVL1();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
void collectGarbage();
CV_WRAP_SAME_PROPERTY(double, Tau, (*alg_))
CV_WRAP_SAME_PROPERTY(double, Lambda, (*alg_))
CV_WRAP_SAME_PROPERTY(double, Theta, (*alg_))
CV_WRAP_SAME_PROPERTY(int, ScalesNumber, (*alg_))
CV_WRAP_SAME_PROPERTY(int, WarpingsNumber, (*alg_))
CV_WRAP_SAME_PROPERTY(double, Epsilon, (*alg_))
CV_WRAP_PROPERTY(int, Iterations, OuterIterations, (*alg_))
CV_WRAP_SAME_PROPERTY(bool, UseInitialFlow, (*alg_))
protected:
void impl(InputArray input0, InputArray input1, OutputArray dst);
private:
double tau_;
double lambda_;
double theta_;
int nscales_;
int warps_;
double epsilon_;
int iterations_;
bool useInitialFlow_;
Ptr<cv::DenseOpticalFlow> alg_;
Ptr<cv::DualTVL1OpticalFlow> alg_;
};
CV_INIT_ALGORITHM(DualTVL1, "DenseOpticalFlowExt.DualTVL1",
obj.info()->addParam(obj, "tau", obj.tau_);
obj.info()->addParam(obj, "lambda", obj.lambda_);
obj.info()->addParam(obj, "theta", obj.theta_);
obj.info()->addParam(obj, "nscales", obj.nscales_);
obj.info()->addParam(obj, "warps", obj.warps_);
obj.info()->addParam(obj, "epsilon", obj.epsilon_);
obj.info()->addParam(obj, "iterations", obj.iterations_);
obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow_))
DualTVL1::DualTVL1() : CpuOpticalFlow(CV_8UC1)
{
alg_ = cv::createOptFlow_DualTVL1();
tau_ = alg_->getDouble("tau");
lambda_ = alg_->getDouble("lambda");
theta_ = alg_->getDouble("theta");
nscales_ = alg_->getInt("nscales");
warps_ = alg_->getInt("warps");
epsilon_ = alg_->getDouble("epsilon");
iterations_ = alg_->getInt("iterations");
useInitialFlow_ = alg_->getBool("useInitialFlow");
}
void DualTVL1::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
CpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void DualTVL1::impl(InputArray input0, InputArray input1, OutputArray dst)
{
alg_->set("tau", tau_);
alg_->set("lambda", lambda_);
alg_->set("theta", theta_);
alg_->set("nscales", nscales_);
alg_->set("warps", warps_);
alg_->set("epsilon", epsilon_);
alg_->set("iterations", iterations_);
alg_->set("useInitialFlow", useInitialFlow_);
alg_->calc(input0, input1, (InputOutputArray)dst);
}
@ -388,7 +373,7 @@ namespace
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1()
Ptr<cv::superres::DualTVL1OpticalFlow> cv::superres::createOptFlow_DualTVL1()
{
return makePtr<DualTVL1>();
}
@ -398,35 +383,35 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1()
#ifndef HAVE_OPENCV_CUDAOPTFLOW
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_CUDA()
Ptr<cv::superres::FarnebackOpticalFlow> cv::superres::createOptFlow_Farneback_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
return Ptr<cv::superres::FarnebackOpticalFlow>();
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_CUDA()
Ptr<cv::superres::DualTVL1OpticalFlow> cv::superres::createOptFlow_DualTVL1_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
return Ptr<cv::superres::DualTVL1OpticalFlow>();
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_CUDA()
Ptr<cv::superres::BroxOpticalFlow> cv::superres::createOptFlow_Brox_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
return Ptr<cv::superres::BroxOpticalFlow>();
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_CUDA()
Ptr<cv::superres::PyrLKOpticalFlow> cv::superres::createOptFlow_PyrLK_CUDA()
{
CV_Error(cv::Error::StsNotImplemented, "The called functionality is disabled for current build or platform");
return Ptr<DenseOpticalFlowExt>();
return Ptr<cv::superres::PyrLKOpticalFlow>();
}
#else // HAVE_OPENCV_CUDAOPTFLOW
namespace
{
class GpuOpticalFlow : public DenseOpticalFlowExt
class GpuOpticalFlow : public virtual cv::superres::DenseOpticalFlowExt
{
public:
explicit GpuOpticalFlow(int work_type);
@ -494,15 +479,20 @@ namespace
namespace
{
class Brox_CUDA : public GpuOpticalFlow
class Brox_CUDA : public GpuOpticalFlow, public virtual cv::superres::BroxOpticalFlow
{
public:
AlgorithmInfo* info() const;
Brox_CUDA();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
void collectGarbage();
CV_IMPL_PROPERTY(double, Alpha, alpha_)
CV_IMPL_PROPERTY(double, Gamma, gamma_)
CV_IMPL_PROPERTY(double, ScaleFactor, scaleFactor_)
CV_IMPL_PROPERTY(int, InnerIterations, innerIterations_)
CV_IMPL_PROPERTY(int, OuterIterations, outerIterations_)
CV_IMPL_PROPERTY(int, SolverIterations, solverIterations_)
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
@ -517,14 +507,6 @@ namespace
Ptr<cuda::BroxOpticalFlow> alg_;
};
CV_INIT_ALGORITHM(Brox_CUDA, "DenseOpticalFlowExt.Brox_CUDA",
obj.info()->addParam(obj, "alpha", obj.alpha_, false, 0, 0, "Flow smoothness");
obj.info()->addParam(obj, "gamma", obj.gamma_, false, 0, 0, "Gradient constancy importance");
obj.info()->addParam(obj, "scaleFactor", obj.scaleFactor_, false, 0, 0, "Pyramid scale factor");
obj.info()->addParam(obj, "innerIterations", obj.innerIterations_, false, 0, 0, "Number of lagged non-linearity iterations (inner loop)");
obj.info()->addParam(obj, "outerIterations", obj.outerIterations_, false, 0, 0, "Number of warping iterations (number of pyramid levels)");
obj.info()->addParam(obj, "solverIterations", obj.solverIterations_, false, 0, 0, "Number of linear system solver iterations"))
Brox_CUDA::Brox_CUDA() : GpuOpticalFlow(CV_32FC1)
{
alg_ = cuda::BroxOpticalFlow::create(0.197f, 50.0f, 0.8f, 10, 77, 10);
@ -537,6 +519,11 @@ namespace
solverIterations_ = alg_->getSolverIterations();
}
void Brox_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void Brox_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_->setFlowSmoothness(alpha_);
@ -563,7 +550,7 @@ namespace
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_CUDA()
Ptr<cv::superres::BroxOpticalFlow> cv::superres::createOptFlow_Brox_CUDA()
{
return makePtr<Brox_CUDA>();
}
@ -573,15 +560,17 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Brox_CUDA()
namespace
{
class PyrLK_CUDA : public GpuOpticalFlow
class PyrLK_CUDA : public GpuOpticalFlow, public cv::superres::PyrLKOpticalFlow
{
public:
AlgorithmInfo* info() const;
PyrLK_CUDA();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
void collectGarbage();
CV_IMPL_PROPERTY(int, WindowSize, winSize_)
CV_IMPL_PROPERTY(int, MaxLevel, maxLevel_)
CV_IMPL_PROPERTY(int, Iterations, iterations_)
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
@ -593,11 +582,6 @@ namespace
Ptr<cuda::DensePyrLKOpticalFlow> alg_;
};
CV_INIT_ALGORITHM(PyrLK_CUDA, "DenseOpticalFlowExt.PyrLK_CUDA",
obj.info()->addParam(obj, "winSize", obj.winSize_);
obj.info()->addParam(obj, "maxLevel", obj.maxLevel_);
obj.info()->addParam(obj, "iterations", obj.iterations_))
PyrLK_CUDA::PyrLK_CUDA() : GpuOpticalFlow(CV_8UC1)
{
alg_ = cuda::DensePyrLKOpticalFlow::create();
@ -607,6 +591,11 @@ namespace
iterations_ = alg_->getNumIters();
}
void PyrLK_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void PyrLK_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_->setWinSize(Size(winSize_, winSize_));
@ -630,7 +619,7 @@ namespace
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_CUDA()
Ptr<cv::superres::PyrLKOpticalFlow> cv::superres::createOptFlow_PyrLK_CUDA()
{
return makePtr<PyrLK_CUDA>();
}
@ -640,15 +629,21 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_PyrLK_CUDA()
namespace
{
class Farneback_CUDA : public GpuOpticalFlow
class Farneback_CUDA : public GpuOpticalFlow, public cv::superres::FarnebackOpticalFlow
{
public:
AlgorithmInfo* info() const;
Farneback_CUDA();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
void collectGarbage();
CV_IMPL_PROPERTY(double, PyrScale, pyrScale_)
CV_IMPL_PROPERTY(int, LevelsNumber, numLevels_)
CV_IMPL_PROPERTY(int, WindowSize, winSize_)
CV_IMPL_PROPERTY(int, Iterations, numIters_)
CV_IMPL_PROPERTY(int, PolyN, polyN_)
CV_IMPL_PROPERTY(double, PolySigma, polySigma_)
CV_IMPL_PROPERTY(int, Flags, flags_)
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
@ -664,15 +659,6 @@ namespace
Ptr<cuda::FarnebackOpticalFlow> alg_;
};
CV_INIT_ALGORITHM(Farneback_CUDA, "DenseOpticalFlowExt.Farneback_CUDA",
obj.info()->addParam(obj, "pyrScale", obj.pyrScale_);
obj.info()->addParam(obj, "numLevels", obj.numLevels_);
obj.info()->addParam(obj, "winSize", obj.winSize_);
obj.info()->addParam(obj, "numIters", obj.numIters_);
obj.info()->addParam(obj, "polyN", obj.polyN_);
obj.info()->addParam(obj, "polySigma", obj.polySigma_);
obj.info()->addParam(obj, "flags", obj.flags_))
Farneback_CUDA::Farneback_CUDA() : GpuOpticalFlow(CV_8UC1)
{
alg_ = cuda::FarnebackOpticalFlow::create();
@ -686,6 +672,11 @@ namespace
flags_ = alg_->getFlags();
}
void Farneback_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void Farneback_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_->setPyrScale(pyrScale_);
@ -713,7 +704,7 @@ namespace
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_CUDA()
Ptr<cv::superres::FarnebackOpticalFlow> cv::superres::createOptFlow_Farneback_CUDA()
{
return makePtr<Farneback_CUDA>();
}
@ -723,15 +714,22 @@ Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_Farneback_CUDA()
namespace
{
class DualTVL1_CUDA : public GpuOpticalFlow
class DualTVL1_CUDA : public GpuOpticalFlow, public cv::superres::DualTVL1OpticalFlow
{
public:
AlgorithmInfo* info() const;
DualTVL1_CUDA();
void calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2);
void collectGarbage();
CV_IMPL_PROPERTY(double, Tau, tau_)
CV_IMPL_PROPERTY(double, Lambda, lambda_)
CV_IMPL_PROPERTY(double, Theta, theta_)
CV_IMPL_PROPERTY(int, ScalesNumber, nscales_)
CV_IMPL_PROPERTY(int, WarpingsNumber, warps_)
CV_IMPL_PROPERTY(double, Epsilon, epsilon_)
CV_IMPL_PROPERTY(int, Iterations, iterations_)
CV_IMPL_PROPERTY(bool, UseInitialFlow, useInitialFlow_)
protected:
void impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2);
@ -748,16 +746,6 @@ namespace
Ptr<cuda::OpticalFlowDual_TVL1> alg_;
};
CV_INIT_ALGORITHM(DualTVL1_CUDA, "DenseOpticalFlowExt.DualTVL1_CUDA",
obj.info()->addParam(obj, "tau", obj.tau_);
obj.info()->addParam(obj, "lambda", obj.lambda_);
obj.info()->addParam(obj, "theta", obj.theta_);
obj.info()->addParam(obj, "nscales", obj.nscales_);
obj.info()->addParam(obj, "warps", obj.warps_);
obj.info()->addParam(obj, "epsilon", obj.epsilon_);
obj.info()->addParam(obj, "iterations", obj.iterations_);
obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow_))
DualTVL1_CUDA::DualTVL1_CUDA() : GpuOpticalFlow(CV_8UC1)
{
alg_ = cuda::OpticalFlowDual_TVL1::create();
@ -772,6 +760,11 @@ namespace
useInitialFlow_ = alg_->getUseInitialFlow();
}
void DualTVL1_CUDA::calc(InputArray frame0, InputArray frame1, OutputArray flow1, OutputArray flow2)
{
GpuOpticalFlow::calc(frame0, frame1, flow1, flow2);
}
void DualTVL1_CUDA::impl(const GpuMat& input0, const GpuMat& input1, GpuMat& dst1, GpuMat& dst2)
{
alg_->setTau(tau_);
@ -800,7 +793,7 @@ namespace
}
}
Ptr<DenseOpticalFlowExt> cv::superres::createOptFlow_DualTVL1_CUDA()
Ptr<cv::superres::DualTVL1OpticalFlow> cv::superres::createOptFlow_DualTVL1_CUDA()
{
return makePtr<DualTVL1_CUDA>();
}

View File

@ -45,11 +45,6 @@
using namespace cv;
using namespace cv::superres;
bool cv::superres::initModule_superres()
{
return !createSuperResolution_BTVL1().empty();
}
cv::superres::SuperResolution::SuperResolution()
{
frameSource_ = createFrameSource_Empty();

View File

@ -222,11 +222,11 @@ void SuperResolution::RunTest(cv::Ptr<cv::superres::SuperResolution> superRes)
ASSERT_FALSE( superRes.empty() );
const int btvKernelSize = superRes->getInt("btvKernelSize");
const int btvKernelSize = superRes->getKernelSize();
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->setScale(scale);
superRes->setIterations(iterations);
superRes->setTemporalAreaRadius(temporalAreaRadius);
cv::Ptr<cv::superres::FrameSource> goldSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale));
cv::Ptr<cv::superres::FrameSource> lowResSource(new DegradeFrameSource(

View File

@ -380,6 +380,21 @@ public:
};
class CV_EXPORTS_W DenseOpticalFlow : public Algorithm
{
public:
/** @brief Calculates an optical flow.
@param I0 first 8-bit single-channel input image.
@param I1 second input image of the same size and the same type as prev.
@param flow computed flow image that has the same size as prev and type CV_32FC2.
*/
CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0;
/** @brief Releases all inner buffers.
*/
CV_WRAP virtual void collectGarbage() = 0;
};
/** @brief "Dual TV L1" Optical Flow Algorithm.
The class implements the "Dual TV L1" optical flow algorithm described in @cite Zach2007 and
@ -422,24 +437,38 @@ constructing the class instance:
C. Zach, T. Pock and H. Bischof, "A Duality Based Approach for Realtime TV-L1 Optical Flow".
Javier Sanchez, Enric Meinhardt-Llopis and Gabriele Facciolo. "TV-L1 Optical Flow Estimation".
*/
class CV_EXPORTS_W DenseOpticalFlow : public Algorithm
class CV_EXPORTS_W DualTVL1OpticalFlow : public DenseOpticalFlow
{
public:
/** @brief Calculates an optical flow.
@param I0 first 8-bit single-channel input image.
@param I1 second input image of the same size and the same type as prev.
@param flow computed flow image that has the same size as prev and type CV_32FC2.
*/
CV_WRAP virtual void calc( InputArray I0, InputArray I1, InputOutputArray flow ) = 0;
/** @brief Releases all inner buffers.
*/
CV_WRAP virtual void collectGarbage() = 0;
//! @brief Time step of the numerical scheme
CV_PURE_PROPERTY(double, Tau)
//! @brief Weight parameter for the data term, attachment parameter
CV_PURE_PROPERTY(double, Lambda)
//! @brief Weight parameter for (u - v)^2, tightness parameter
CV_PURE_PROPERTY(double, Theta)
//! @brief coefficient for additional illumination variation term
CV_PURE_PROPERTY(double, Gamma)
//! @brief Number of scales used to create the pyramid of images
CV_PURE_PROPERTY(int, ScalesNumber)
//! @brief Number of warpings per scale
CV_PURE_PROPERTY(int, WarpingsNumber)
//! @brief Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time
CV_PURE_PROPERTY(double, Epsilon)
//! @brief Inner iterations (between outlier filtering) used in the numerical scheme
CV_PURE_PROPERTY(int, InnerIterations)
//! @brief Outer iterations (number of inner loops) used in the numerical scheme
CV_PURE_PROPERTY(int, OuterIterations)
//! @brief Use initial flow
CV_PURE_PROPERTY(bool, UseInitialFlow)
//! @brief Step between scales (<1)
CV_PURE_PROPERTY(double, ScaleStep)
//! @brief Median filter kernel size (1 = no filter) (3 or 5)
CV_PURE_PROPERTY(int, MedianFiltering)
};
/** @brief Creates instance of cv::DenseOpticalFlow
*/
CV_EXPORTS_W Ptr<DenseOpticalFlow> createOptFlow_DualTVL1();
CV_EXPORTS_W Ptr<DualTVL1OpticalFlow> createOptFlow_DualTVL1();
//! @} video_track

View File

@ -87,11 +87,11 @@ OCL_PERF_TEST_P(OpticalFlowDualTVL1Fixture, OpticalFlowDualTVL1,
declare.in(uFrame0, uFrame1, WARMUP_READ).out(uFlow, WARMUP_READ);
//create algorithm
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
//set parameters
alg->set("scaleStep", scaleStep);
alg->setInt("medianFiltering", medianFiltering);
alg->setScaleStep(scaleStep);
alg->setMedianFiltering(medianFiltering);
if (useInitFlow)
{
@ -100,7 +100,7 @@ OCL_PERF_TEST_P(OpticalFlowDualTVL1Fixture, OpticalFlowDualTVL1,
}
//set flag to use initial flow
alg->setBool("useInitialFlow", useInitFlow);
alg->setUseInitialFlow(useInitFlow);
OCL_TEST_CYCLE()
alg->calc(uFrame0, uFrame1, uFlow);

View File

@ -160,8 +160,6 @@ public:
nNextLongUpdate = Scalar::all(0);
}
virtual AlgorithmInfo* info() const { return 0; }
virtual int getHistory() const { return history; }
virtual void setHistory(int _nframes) { history = _nframes; }

View File

@ -230,8 +230,6 @@ public:
}
}
virtual AlgorithmInfo* info() const { return 0; }
virtual int getHistory() const { return history; }
virtual void setHistory(int _nframes) { history = _nframes; }

View File

@ -86,7 +86,7 @@ using namespace cv;
namespace {
class OpticalFlowDual_TVL1 : public DenseOpticalFlow
class OpticalFlowDual_TVL1 : public DualTVL1OpticalFlow
{
public:
OpticalFlowDual_TVL1();
@ -94,7 +94,18 @@ public:
void calc(InputArray I0, InputArray I1, InputOutputArray flow);
void collectGarbage();
AlgorithmInfo* info() const;
CV_IMPL_PROPERTY(double, Tau, tau)
CV_IMPL_PROPERTY(double, Lambda, lambda)
CV_IMPL_PROPERTY(double, Theta, theta)
CV_IMPL_PROPERTY(double, Gamma, gamma)
CV_IMPL_PROPERTY(int, ScalesNumber, nscales)
CV_IMPL_PROPERTY(int, WarpingsNumber, warps)
CV_IMPL_PROPERTY(double, Epsilon, epsilon)
CV_IMPL_PROPERTY(int, InnerIterations, innerIterations)
CV_IMPL_PROPERTY(int, OuterIterations, outerIterations)
CV_IMPL_PROPERTY(bool, UseInitialFlow, useInitialFlow)
CV_IMPL_PROPERTY(double, ScaleStep, scaleStep)
CV_IMPL_PROPERTY(int, MedianFiltering, medianFiltering)
protected:
double tau;
@ -1416,35 +1427,9 @@ void OpticalFlowDual_TVL1::collectGarbage()
dum.norm_buf.release();
}
CV_INIT_ALGORITHM(OpticalFlowDual_TVL1, "DenseOpticalFlow.DualTVL1",
obj.info()->addParam(obj, "tau", obj.tau, false, 0, 0,
"Time step of the numerical scheme");
obj.info()->addParam(obj, "lambda", obj.lambda, false, 0, 0,
"Weight parameter for the data term, attachment parameter");
obj.info()->addParam(obj, "theta", obj.theta, false, 0, 0,
"Weight parameter for (u - v)^2, tightness parameter");
obj.info()->addParam(obj, "nscales", obj.nscales, false, 0, 0,
"Number of scales used to create the pyramid of images");
obj.info()->addParam(obj, "warps", obj.warps, false, 0, 0,
"Number of warpings per scale");
obj.info()->addParam(obj, "medianFiltering", obj.medianFiltering, false, 0, 0,
"Median filter kernel size (1 = no filter) (3 or 5)");
obj.info()->addParam(obj, "scaleStep", obj.scaleStep, false, 0, 0,
"Step between scales (<1)");
obj.info()->addParam(obj, "epsilon", obj.epsilon, false, 0, 0,
"Stopping criterion threshold used in the numerical scheme, which is a trade-off between precision and running time");
obj.info()->addParam(obj, "innerIterations", obj.innerIterations, false, 0, 0,
"inner iterations (between outlier filtering) used in the numerical scheme");
obj.info()->addParam(obj, "outerIterations", obj.outerIterations, false, 0, 0,
"outer iterations (number of inner loops) used in the numerical scheme");
obj.info()->addParam(obj, "gamma", obj.gamma, false, 0, 0,
"coefficient for additional illumination variation term");
obj.info()->addParam(obj, "useInitialFlow", obj.useInitialFlow))
} // namespace
Ptr<DenseOpticalFlow> cv::createOptFlow_DualTVL1()
Ptr<DualTVL1OpticalFlow> cv::createOptFlow_DualTVL1()
{
return makePtr<OpticalFlowDual_TVL1>();
}

View File

@ -82,11 +82,11 @@ OCL_TEST_P(OpticalFlowTVL1, Mat)
cv::Mat flow; cv::UMat uflow;
//create algorithm
cv::Ptr<cv::DenseOpticalFlow> alg = cv::createOptFlow_DualTVL1();
cv::Ptr<cv::DualTVL1OpticalFlow> alg = cv::createOptFlow_DualTVL1();
//set parameters
alg->set("scaleStep", scaleStep);
alg->setInt("medianFiltering", medianFiltering);
alg->setScaleStep(scaleStep);
alg->setMedianFiltering(medianFiltering);
//create initial flow as result of algorithm calculation
if (useInitFlow)
@ -96,7 +96,7 @@ OCL_TEST_P(OpticalFlowTVL1, Mat)
}
//set flag to use initial flow as it is ready to use
alg->setBool("useInitialFlow", useInitFlow);
alg->setUseInitialFlow(useInitFlow);
OCL_OFF(alg->calc(frame0, frame1, flow));
OCL_ON(alg->calc(frame0, frame1, uflow));

View File

@ -36,9 +36,11 @@ int main( int /*argc*/, char** /*argv*/ )
samples = samples.reshape(1, 0);
// cluster the data
Ptr<EM> em_model = EM::train( samples, noArray(), labels, noArray(),
EM::Params(N, EM::COV_MAT_SPHERICAL,
TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1)));
Ptr<EM> em_model = EM::create();
em_model->setClustersNumber(N);
em_model->setCovarianceMatrixType(EM::COV_MAT_SPHERICAL);
em_model->setTermCriteria(TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 300, 0.1));
em_model->trainEM( samples, noArray(), labels, noArray() );
// classify every image pixel
for( i = 0; i < img.rows; i++ )

View File

@ -178,8 +178,23 @@ build_rtrees_classifier( const string& data_filename,
{
// create classifier by using <data> and <responses>
cout << "Training the classifier ...\n";
// Params( int maxDepth, int minSampleCount,
// double regressionAccuracy, bool useSurrogates,
// int maxCategories, const Mat& priors,
// bool calcVarImportance, int nactiveVars,
// TermCriteria termCrit );
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = StatModel::train<RTrees>(tdata, RTrees::Params(10,10,0,false,15,Mat(),true,4,TC(100,0.01f)));
model = RTrees::create();
model->setMaxDepth(10);
model->setMinSampleCount(10);
model->setRegressionAccuracy(0);
model->setUseSurrogates(false);
model->setMaxCategories(15);
model->setPriors(Mat());
model->setCalculateVarImportance(true);
model->setActiveVarCount(4);
model->setTermCriteria(TC(100,0.01f));
model->train(tdata);
cout << endl;
}
@ -269,7 +284,14 @@ build_boost_classifier( const string& data_filename,
priors[1] = 26;
cout << "Training the classifier (may take a few minutes)...\n";
model = StatModel::train<Boost>(tdata, Boost::Params(Boost::GENTLE, 100, 0.95, 5, false, Mat(priors) ));
model = Boost::create();
model->setBoostType(Boost::GENTLE);
model->setWeakCount(100);
model->setWeightTrimRate(0.95);
model->setMaxDepth(5);
model->setUseSurrogates(false);
model->setPriors(Mat(priors));
model->train(tdata);
cout << endl;
}
@ -374,11 +396,11 @@ build_mlp_classifier( const string& data_filename,
Mat layer_sizes( 1, nlayers, CV_32S, layer_sz );
#if 1
int method = ANN_MLP::Params::BACKPROP;
int method = ANN_MLP::BACKPROP;
double method_param = 0.001;
int max_iter = 300;
#else
int method = ANN_MLP::Params::RPROP;
int method = ANN_MLP::RPROP;
double method_param = 0.1;
int max_iter = 1000;
#endif
@ -386,7 +408,12 @@ build_mlp_classifier( const string& data_filename,
Ptr<TrainData> tdata = TrainData::create(train_data, ROW_SAMPLE, train_responses);
cout << "Training the classifier (may take a few minutes)...\n";
model = StatModel::train<ANN_MLP>(tdata, ANN_MLP::Params(layer_sizes, ANN_MLP::SIGMOID_SYM, 0, 0, TC(max_iter,0), method, method_param));
model = ANN_MLP::create();
model->setLayerSizes(layer_sizes);
model->setActivationFunction(ANN_MLP::SIGMOID_SYM, 0, 0);
model->setTermCriteria(TC(max_iter,0));
model->setTrainMethod(method, method_param);
model->train(tdata);
cout << endl;
}
@ -403,7 +430,6 @@ build_knearest_classifier( const string& data_filename, int K )
if( !ok )
return ok;
Ptr<KNearest> model;
int nsamples_all = data.rows;
int ntrain_samples = (int)(nsamples_all*0.8);
@ -411,7 +437,10 @@ build_knearest_classifier( const string& data_filename, int K )
// create classifier by using <data> and <responses>
cout << "Training the classifier ...\n";
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = StatModel::train<KNearest>(tdata, KNearest::Params(K, true));
Ptr<KNearest> model = KNearest::create();
model->setDefaultK(K);
model->setIsClassifier(true);
model->train(tdata);
cout << endl;
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
@ -435,7 +464,8 @@ build_nbayes_classifier( const string& data_filename )
// create classifier by using <data> and <responses>
cout << "Training the classifier ...\n";
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
model = StatModel::train<NormalBayesClassifier>(tdata, NormalBayesClassifier::Params());
model = NormalBayesClassifier::create();
model->train(tdata);
cout << endl;
test_and_save_classifier(model, data, responses, ntrain_samples, 0, string());
@ -471,13 +501,11 @@ build_svm_classifier( const string& data_filename,
// create classifier by using <data> and <responses>
cout << "Training the classifier ...\n";
Ptr<TrainData> tdata = prepare_train_data(data, responses, ntrain_samples);
SVM::Params params;
params.svmType = SVM::C_SVC;
params.kernelType = SVM::LINEAR;
params.C = 1;
model = StatModel::train<SVM>(tdata, params);
model = SVM::create();
model->setType(SVM::C_SVC);
model->setKernel(SVM::LINEAR);
model->setC(1);
model->train(tdata);
cout << endl;
}

View File

@ -132,20 +132,16 @@ int main()
showImage(data_train, 28, "train data");
showImage(data_test, 28, "test data");
// simple case with batch gradient
LogisticRegression::Params params = LogisticRegression::Params(
0.001, 10, LogisticRegression::BATCH, LogisticRegression::REG_L2, 1, 1);
// simple case with mini-batch gradient
// LogisticRegression::Params params = LogisticRegression::Params(
// 0.001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
// mini-batch gradient with higher accuracy
// LogisticRegression::Params params = LogisticRegression::Params(
// 0.000001, 10, LogisticRegression::MINI_BATCH, LogisticRegression::REG_L2, 1, 1);
cout << "training...";
Ptr<StatModel> lr1 = LogisticRegression::create(params);
//! [init]
Ptr<LogisticRegression> lr1 = LogisticRegression::create();
lr1->setLearningRate(0.001);
lr1->setIterations(10);
lr1->setRegularization(LogisticRegression::REG_L2);
lr1->setTrainMethod(LogisticRegression::BATCH);
lr1->setMiniBatchSize(1);
//! [init]
lr1->train(data_train, ROW_SAMPLE, labels_train);
cout << "done!" << endl;

View File

@ -102,7 +102,7 @@ static void predict_and_paint(const Ptr<StatModel>& model, Mat& dst)
static void find_decision_boundary_NBC()
{
// learn classifier
Ptr<NormalBayesClassifier> normalBayesClassifier = StatModel::train<NormalBayesClassifier>(prepare_train_data(), NormalBayesClassifier::Params());
Ptr<NormalBayesClassifier> normalBayesClassifier = StatModel::train<NormalBayesClassifier>(prepare_train_data());
predict_and_paint(normalBayesClassifier, imgDst);
}
@ -112,15 +112,29 @@ static void find_decision_boundary_NBC()
#if _KNN_
static void find_decision_boundary_KNN( int K )
{
Ptr<KNearest> knn = StatModel::train<KNearest>(prepare_train_data(), KNearest::Params(K, true));
Ptr<KNearest> knn = KNearest::create();
knn->setDefaultK(K);
knn->setIsClassifier(true);
knn->train(prepare_train_data());
predict_and_paint(knn, imgDst);
}
#endif
#if _SVM_
static void find_decision_boundary_SVM( SVM::Params params )
static void find_decision_boundary_SVM( double C )
{
Ptr<SVM> svm = StatModel::train<SVM>(prepare_train_data(), params);
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::POLY); //SVM::LINEAR;
svm->setDegree(0.5);
svm->setGamma(1);
svm->setCoef0(1);
svm->setNu(0.5);
svm->setP(0);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01));
svm->setC(C);
svm->train(prepare_train_data());
predict_and_paint(svm, imgDst);
Mat sv = svm->getSupportVectors();
@ -135,16 +149,14 @@ static void find_decision_boundary_SVM( SVM::Params params )
#if _DT_
static void find_decision_boundary_DT()
{
DTrees::Params params;
params.maxDepth = 8;
params.minSampleCount = 2;
params.useSurrogates = false;
params.CVFolds = 0; // the number of cross-validation folds
params.use1SERule = false;
params.truncatePrunedTree = false;
Ptr<DTrees> dtree = StatModel::train<DTrees>(prepare_train_data(), params);
Ptr<DTrees> dtree = DTrees::create();
dtree->setMaxDepth(8);
dtree->setMinSampleCount(2);
dtree->setUseSurrogates(false);
dtree->setCVFolds(0); // the number of cross-validation folds
dtree->setUse1SERule(false);
dtree->setTruncatePrunedTree(false);
dtree->train(prepare_train_data());
predict_and_paint(dtree, imgDst);
}
#endif
@ -152,15 +164,14 @@ static void find_decision_boundary_DT()
#if _BT_
static void find_decision_boundary_BT()
{
Boost::Params params( Boost::DISCRETE, // boost_type
100, // weak_count
0.95, // weight_trim_rate
2, // max_depth
false, //use_surrogates
Mat() // priors
);
Ptr<Boost> boost = StatModel::train<Boost>(prepare_train_data(), params);
Ptr<Boost> boost = Boost::create();
boost->setBoostType(Boost::DISCRETE);
boost->setWeakCount(100);
boost->setWeightTrimRate(0.95);
boost->setMaxDepth(2);
boost->setUseSurrogates(false);
boost->setPriors(Mat());
boost->train(prepare_train_data());
predict_and_paint(boost, imgDst);
}
@ -185,18 +196,17 @@ static void find_decision_boundary_GBT()
#if _RF_
static void find_decision_boundary_RF()
{
RTrees::Params params( 4, // max_depth,
2, // min_sample_count,
0.f, // regression_accuracy,
false, // use_surrogates,
16, // max_categories,
Mat(), // priors,
false, // calc_var_importance,
1, // nactive_vars,
TermCriteria(TermCriteria::MAX_ITER, 5, 0) // max_num_of_trees_in_the_forest,
);
Ptr<RTrees> rtrees = StatModel::train<RTrees>(prepare_train_data(), params);
Ptr<RTrees> rtrees = RTrees::create();
rtrees->setMaxDepth(4);
rtrees->setMinSampleCount(2);
rtrees->setRegressionAccuracy(0.f);
rtrees->setUseSurrogates(false);
rtrees->setMaxCategories(16);
rtrees->setPriors(Mat());
rtrees->setCalculateVarImportance(false);
rtrees->setActiveVarCount(1);
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 5, 0));
rtrees->train(prepare_train_data());
predict_and_paint(rtrees, imgDst);
}
@ -205,9 +215,6 @@ static void find_decision_boundary_RF()
#if _ANN_
static void find_decision_boundary_ANN( const Mat& layer_sizes )
{
ANN_MLP::Params params(layer_sizes, ANN_MLP::SIGMOID_SYM, 1, 1, TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON),
ANN_MLP::Params::BACKPROP, 0.001);
Mat trainClasses = Mat::zeros( (int)trainedPoints.size(), (int)classColors.size(), CV_32FC1 );
for( int i = 0; i < trainClasses.rows; i++ )
{
@ -217,7 +224,12 @@ static void find_decision_boundary_ANN( const Mat& layer_sizes )
Mat samples = prepare_train_samples(trainedPoints);
Ptr<TrainData> tdata = TrainData::create(samples, ROW_SAMPLE, trainClasses);
Ptr<ANN_MLP> ann = StatModel::train<ANN_MLP>(tdata, params);
Ptr<ANN_MLP> ann = ANN_MLP::create();
ann->setLayerSizes(layer_sizes);
ann->setActivationFunction(ANN_MLP::SIGMOID_SYM, 1, 1);
ann->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 300, FLT_EPSILON));
ann->setTrainMethod(ANN_MLP::BACKPROP, 0.001);
ann->train(tdata);
predict_and_paint(ann, imgDst);
}
#endif
@ -247,8 +259,11 @@ static void find_decision_boundary_EM()
// learn models
if( !modelSamples.empty() )
{
em_models[i] = EM::train(modelSamples, noArray(), noArray(), noArray(),
EM::Params(componentCount, EM::COV_MAT_DIAGONAL));
Ptr<EM> em = EM::create();
em->setClustersNumber(componentCount);
em->setCovarianceMatrixType(EM::COV_MAT_DIAGONAL);
em->trainEM(modelSamples, noArray(), noArray(), noArray());
em_models[i] = em;
}
}
@ -332,33 +347,20 @@ int main()
imshow( "NormalBayesClassifier", imgDst );
#endif
#if _KNN_
int K = 3;
find_decision_boundary_KNN( K );
find_decision_boundary_KNN( 3 );
imshow( "kNN", imgDst );
K = 15;
find_decision_boundary_KNN( K );
find_decision_boundary_KNN( 15 );
imshow( "kNN2", imgDst );
#endif
#if _SVM_
//(1)-(2)separable and not sets
SVM::Params params;
params.svmType = SVM::C_SVC;
params.kernelType = SVM::POLY; //CvSVM::LINEAR;
params.degree = 0.5;
params.gamma = 1;
params.coef0 = 1;
params.C = 1;
params.nu = 0.5;
params.p = 0;
params.termCrit = TermCriteria(TermCriteria::MAX_ITER+TermCriteria::EPS, 1000, 0.01);
find_decision_boundary_SVM( params );
find_decision_boundary_SVM( 1 );
imshow( "classificationSVM1", imgDst );
params.C = 10;
find_decision_boundary_SVM( params );
find_decision_boundary_SVM( 10 );
imshow( "classificationSVM2", imgDst );
#endif

View File

@ -141,7 +141,7 @@ Mat get_hogdescriptor_visu(const Mat& color_origImg, vector<float>& descriptorVa
int cellSize = 8;
int gradientBinSize = 9;
float radRangeForOneBin = (float)(CV_PI/(float)gradientBinSize); // dividing 180° into 9 bins, how large (in rad) is one bin?
float radRangeForOneBin = (float)(CV_PI/(float)gradientBinSize); // dividing 180 into 9 bins, how large (in rad) is one bin?
// prepare data structure: 9 orientation / gradient strenghts for each cell
int cells_in_x_dir = DIMX / cellSize;
@ -313,23 +313,23 @@ void compute_hog( const vector< Mat > & img_lst, vector< Mat > & gradient_lst, c
void train_svm( const vector< Mat > & gradient_lst, const vector< int > & labels )
{
/* Default values to train SVM */
SVM::Params params;
params.coef0 = 0.0;
params.degree = 3;
params.termCrit.epsilon = 1e-3;
params.gamma = 0;
params.kernelType = SVM::LINEAR;
params.nu = 0.5;
params.p = 0.1; // for EPSILON_SVR, epsilon in loss function?
params.C = 0.01; // From paper, soft classifier
params.svmType = SVM::EPS_SVR; // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task
Mat train_data;
convert_to_ml( gradient_lst, train_data );
clog << "Start training...";
Ptr<SVM> svm = StatModel::train<SVM>(train_data, ROW_SAMPLE, Mat(labels), params);
Ptr<SVM> svm = SVM::create();
/* Default values to train SVM */
svm->setCoef0(0.0);
svm->setDegree(3);
svm->setTermCriteria(TermCriteria( CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, 1e-3 ));
svm->setGamma(0);
svm->setKernel(SVM::LINEAR);
svm->setNu(0.5);
svm->setP(0.1); // for EPSILON_SVR, epsilon in loss function?
svm->setC(0.01); // From paper, soft classifier
svm->setType(SVM::EPS_SVR); // C_SVC; // EPSILON_SVR; // may be also NU_SVR; // do regression task
svm->train(train_data, ROW_SAMPLE, Mat(labels));
clog << "...[done]" << endl;
svm->save( "my_people_detector.yml" );

View File

@ -73,18 +73,42 @@ int main(int argc, char** argv)
data->setTrainTestSplitRatio(train_test_split_ratio);
printf("======DTREE=====\n");
Ptr<DTrees> dtree = DTrees::create(DTrees::Params( 10, 2, 0, false, 16, 0, false, false, Mat() ));
Ptr<DTrees> dtree = DTrees::create();
dtree->setMaxDepth(10);
dtree->setMinSampleCount(2);
dtree->setRegressionAccuracy(0);
dtree->setUseSurrogates(false);
dtree->setMaxCategories(16);
dtree->setCVFolds(0);
dtree->setUse1SERule(false);
dtree->setTruncatePrunedTree(false);
dtree->setPriors(Mat());
train_and_print_errs(dtree, data);
if( (int)data->getClassLabels().total() <= 2 ) // regression or 2-class classification problem
{
printf("======BOOST=====\n");
Ptr<Boost> boost = Boost::create(Boost::Params(Boost::GENTLE, 100, 0.95, 2, false, Mat()));
Ptr<Boost> boost = Boost::create();
boost->setBoostType(Boost::GENTLE);
boost->setWeakCount(100);
boost->setWeightTrimRate(0.95);
boost->setMaxDepth(2);
boost->setUseSurrogates(false);
boost->setPriors(Mat());
train_and_print_errs(boost, data);
}
printf("======RTREES=====\n");
Ptr<RTrees> rtrees = RTrees::create(RTrees::Params(10, 2, 0, false, 16, Mat(), false, 0, TermCriteria(TermCriteria::MAX_ITER, 100, 0)));
Ptr<RTrees> rtrees = RTrees::create();
rtrees->setMaxDepth(10);
rtrees->setMinSampleCount(2);
rtrees->setRegressionAccuracy(0);
rtrees->setUseSurrogates(false);
rtrees->setMaxCategories(16);
rtrees->setPriors(Mat());
rtrees->setCalculateVarImportance(false);
rtrees->setActiveVarCount(0);
rtrees->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 0));
train_and_print_errs(rtrees, data);
return 0;

View File

@ -138,7 +138,7 @@ int main(int argc, char **argv)
Stats stats, akaze_stats, orb_stats;
Ptr<AKAZE> akaze = AKAZE::create();
akaze->set("threshold", akaze_thresh);
akaze->setThreshold(akaze_thresh);
Ptr<ORB> orb = ORB::create();
orb->setMaxFeatures(stats.keypoints);
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create("BruteForce-Hamming");
@ -163,7 +163,7 @@ int main(int argc, char **argv)
akaze_draw_stats = stats;
}
orb_tracker.getDetector()->set("nFeatures", stats.keypoints);
orb->setMaxFeatures(stats.keypoints);
orb_res = orb_tracker.process(frame, stats);
orb_stats += stats;
if(update_stats) {

View File

@ -14,23 +14,30 @@ int main(int, char**)
Mat image = Mat::zeros(height, width, CV_8UC3);
// Set up training data
//! [setup1]
int labels[4] = {1, -1, -1, -1};
Mat labelsMat(4, 1, CV_32SC1, labels);
float trainingData[4][2] = { {501, 10}, {255, 10}, {501, 255}, {10, 501} };
//! [setup1]
//! [setup2]
Mat trainingDataMat(4, 2, CV_32FC1, trainingData);
Mat labelsMat(4, 1, CV_32SC1, labels);
//! [setup2]
// Set up SVM's parameters
SVM::Params params;
params.svmType = SVM::C_SVC;
params.kernelType = SVM::LINEAR;
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6);
// Train the SVM
Ptr<SVM> svm = StatModel::train<SVM>(trainingDataMat, ROW_SAMPLE, labelsMat, params);
//! [init]
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, 100, 1e-6));
//! [init]
//! [train]
svm->train(trainingDataMat, ROW_SAMPLE, labelsMat);
//! [train]
Vec3b green(0,255,0), blue (255,0,0);
// Show the decision regions given by the SVM
//! [show]
Vec3b green(0,255,0), blue (255,0,0);
for (int i = 0; i < image.rows; ++i)
for (int j = 0; j < image.cols; ++j)
{
@ -42,16 +49,20 @@ int main(int, char**)
else if (response == -1)
image.at<Vec3b>(i,j) = blue;
}
//! [show]
// Show the training data
//! [show_data]
int thickness = -1;
int lineType = 8;
circle( image, Point(501, 10), 5, Scalar( 0, 0, 0), thickness, lineType );
circle( image, Point(255, 10), 5, Scalar(255, 255, 255), thickness, lineType );
circle( image, Point(501, 255), 5, Scalar(255, 255, 255), thickness, lineType );
circle( image, Point( 10, 501), 5, Scalar(255, 255, 255), thickness, lineType );
//! [show_data]
// Show support vectors
//! [show_vectors]
thickness = 2;
lineType = 8;
Mat sv = svm->getSupportVectors();
@ -61,6 +72,7 @@ int main(int, char**)
const float* v = sv.ptr<float>(i);
circle( image, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thickness, lineType);
}
//! [show_vectors]
imwrite("result.png", image); // save the image

View File

@ -39,6 +39,7 @@ int main()
// Set up the linearly separable part of the training data
int nLinearSamples = (int) (FRAC_LINEAR_SEP * NTRAINING_SAMPLES);
//! [setup1]
// Generate random points for the class 1
Mat trainClass = trainData.rowRange(0, nLinearSamples);
// The x coordinate of the points is in [0, 0.4)
@ -56,9 +57,10 @@ int main()
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
//! [setup1]
//------------------ Set up the non-linearly separable part of the training data ---------------
//! [setup2]
// Generate random points for the classes 1 and 2
trainClass = trainData.rowRange( nLinearSamples, 2*NTRAINING_SAMPLES-nLinearSamples);
// The x coordinate of the points is in [0.4, 0.6)
@ -67,24 +69,28 @@ int main()
// The y coordinate of the points is in [0, 1)
c = trainClass.colRange(1,2);
rng.fill(c, RNG::UNIFORM, Scalar(1), Scalar(HEIGHT));
//! [setup2]
//------------------------- Set up the labels for the classes ---------------------------------
labels.rowRange( 0, NTRAINING_SAMPLES).setTo(1); // Class 1
labels.rowRange(NTRAINING_SAMPLES, 2*NTRAINING_SAMPLES).setTo(2); // Class 2
//------------------------ 2. Set up the support vector machines parameters --------------------
SVM::Params params;
params.svmType = SVM::C_SVC;
params.C = 0.1;
params.kernelType = SVM::LINEAR;
params.termCrit = TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6);
//------------------------ 3. Train the svm ----------------------------------------------------
cout << "Starting training process" << endl;
Ptr<SVM> svm = StatModel::train<SVM>(trainData, ROW_SAMPLE, labels, params);
//! [init]
Ptr<SVM> svm = SVM::create();
svm->setType(SVM::C_SVC);
svm->setC(0.1);
svm->setKernel(SVM::LINEAR);
svm->setTermCriteria(TermCriteria(TermCriteria::MAX_ITER, (int)1e7, 1e-6));
//! [init]
//! [train]
svm->train(trainData, ROW_SAMPLE, labels);
//! [train]
cout << "Finished training process" << endl;
//------------------------ 4. Show the decision regions ----------------------------------------
//! [show]
Vec3b green(0,100,0), blue (100,0,0);
for (int i = 0; i < I.rows; ++i)
for (int j = 0; j < I.cols; ++j)
@ -95,8 +101,10 @@ int main()
if (response == 1) I.at<Vec3b>(j, i) = green;
else if (response == 2) I.at<Vec3b>(j, i) = blue;
}
//! [show]
//----------------------- 5. Show the training data --------------------------------------------
//! [show_data]
int thick = -1;
int lineType = 8;
float px, py;
@ -114,8 +122,10 @@ int main()
py = trainData.at<float>(i,1);
circle(I, Point( (int) px, (int) py ), 3, Scalar(255, 0, 0), thick, lineType);
}
//! [show_data]
//------------------------- 6. Show support vectors --------------------------------------------
//! [show_vectors]
thick = 2;
lineType = 8;
Mat sv = svm->getSupportVectors();
@ -125,6 +135,7 @@ int main()
const float* v = sv.ptr<float>(i);
circle( I, Point( (int) v[0], (int) v[1]), 6, Scalar(128, 128, 128), thick, lineType);
}
//! [show_vectors]
imwrite("result.png", I); // save the Image
imshow("SVM for Non-Linear Training Data", I); // show it to the user

View File

@ -26,32 +26,32 @@ using namespace cv::superres;
cout << tm.getTimeSec() << " sec" << endl; \
}
static Ptr<DenseOpticalFlowExt> createOptFlow(const string& name, bool useGpu)
static Ptr<cv::superres::DenseOpticalFlowExt> createOptFlow(const string& name, bool useGpu)
{
if (name == "farneback")
{
if (useGpu)
return createOptFlow_Farneback_CUDA();
return cv::superres::createOptFlow_Farneback_CUDA();
else
return createOptFlow_Farneback();
return cv::superres::createOptFlow_Farneback();
}
/*else if (name == "simple")
return createOptFlow_Simple();*/
else if (name == "tvl1")
{
if (useGpu)
return createOptFlow_DualTVL1_CUDA();
return cv::superres::createOptFlow_DualTVL1_CUDA();
else
return createOptFlow_DualTVL1();
return cv::superres::createOptFlow_DualTVL1();
}
else if (name == "brox")
return createOptFlow_Brox_CUDA();
return cv::superres::createOptFlow_Brox_CUDA();
else if (name == "pyrlk")
return createOptFlow_PyrLK_CUDA();
return cv::superres::createOptFlow_PyrLK_CUDA();
else
cerr << "Incorrect Optical Flow algorithm - " << name << endl;
return Ptr<DenseOpticalFlowExt>();
return Ptr<cv::superres::DenseOpticalFlowExt>();
}
int main(int argc, const char* argv[])
@ -92,15 +92,15 @@ int main(int argc, const char* argv[])
else
superRes = createSuperResolution_BTVL1();
Ptr<DenseOpticalFlowExt> of = createOptFlow(optFlow, useCuda);
Ptr<cv::superres::DenseOpticalFlowExt> of = createOptFlow(optFlow, useCuda);
if (of.empty())
return EXIT_FAILURE;
superRes->set("opticalFlow", of);
superRes->setOpticalFlow(of);
superRes->set("scale", scale);
superRes->set("iterations", iterations);
superRes->set("temporalAreaRadius", temporalAreaRadius);
superRes->setScale(scale);
superRes->setIterations(iterations);
superRes->setTemporalAreaRadius(temporalAreaRadius);
Ptr<FrameSource> frameSource;
if (useCuda)

View File

@ -62,19 +62,17 @@ int main(int argc, char* argv[])
cout << "FOUND " << keypoints2GPU.cols << " keypoints on second image" << endl;
// matching descriptors
BFMatcher_CUDA matcher(surf.defaultNorm());
GpuMat trainIdx, distance;
matcher.matchSingle(descriptors1GPU, descriptors2GPU, trainIdx, distance);
Ptr<cv::cuda::DescriptorMatcher> matcher = cv::cuda::DescriptorMatcher::createBFMatcher(surf.defaultNorm());
vector<DMatch> matches;
matcher->match(descriptors1GPU, descriptors2GPU, matches);
// downloading results
vector<KeyPoint> keypoints1, keypoints2;
vector<float> descriptors1, descriptors2;
vector<DMatch> matches;
surf.downloadKeypoints(keypoints1GPU, keypoints1);
surf.downloadKeypoints(keypoints2GPU, keypoints2);
surf.downloadDescriptors(descriptors1GPU, descriptors1);
surf.downloadDescriptors(descriptors2GPU, descriptors2);
BFMatcher_CUDA::matchDownload(trainIdx, distance, matches);
// drawing the results
Mat img_matches;